Splitting out to separate function files
This commit is contained in:
parent
f755ec2edc
commit
c2cd91b766
@ -5,114 +5,12 @@
|
||||
|
||||
# 1. Penalized Regression and Classification
|
||||
## a. Modified Ridge classification for LASSO penalties
|
||||
penalized_loss <- function(X, y, beta, lam, alpha=0){
|
||||
# y needs to be 0/1
|
||||
# beta: regression coefficients
|
||||
# lam: penalty, lam=0 un-penalized logistic regression
|
||||
# alpha = 0 ridge penalty, alpha = 1 lasso penalty
|
||||
m <- nrow(X)
|
||||
Xtilde <- as.matrix(cbind(intercept=rep(1,m), X))
|
||||
cnames <- colnames(Xtilde)
|
||||
z <- Xtilde %*% beta # column vector
|
||||
yhat <- 1/(1+exp(-z))
|
||||
yclass <- as.numeric(y)
|
||||
# 1. logistic unpenalized loss
|
||||
penal.loss <- sum(-yclass*log(yhat) - (1-yclass)*log(1-yhat))/m +
|
||||
# 2. penalty, lam=0 removes penalty
|
||||
lam*((1-alpha)*lam*sum(beta*beta)/2 + # ridge
|
||||
alpha*sum(abs(beta))) # lasso
|
||||
return(penal.loss)
|
||||
}
|
||||
|
||||
ridge_grad <- function(X, y, beta, lam){
|
||||
# y needs to be 0/1
|
||||
# also works for non-penalized logistic regression if lam=0
|
||||
m <- nrow(X)
|
||||
p <- ncol(X)
|
||||
Xtilde <- as.matrix(cbind(intercept=rep(1,m), X))
|
||||
cnames <- colnames(Xtilde)
|
||||
z <- Xtilde %*% beta # column vector
|
||||
yhat <- 1/(1+exp(-z))
|
||||
yclass <- as.numeric(y)
|
||||
grad <- rep(0,p+1)
|
||||
for (a in seq(1,p+1)){
|
||||
beta_a <- beta[a] # input beta from previous descent step
|
||||
Loss.grad <- sum(-yclass*(1-yhat)*Xtilde[,a] +
|
||||
(1-yclass)*yhat*Xtilde[,a])
|
||||
grad[a] <- Loss.grad + lam*beta_a
|
||||
} # end for loop
|
||||
grad <- grad/m
|
||||
return(grad)
|
||||
}
|
||||
|
||||
### gradient descent to optimize beta's
|
||||
ridge_betas <- function(X,y,beta_init=NULL,lam, alpha=0, method="BFGS"){
|
||||
if (is.null(beta_init)){beta_init <- rep(.1, ncol(X)+1)}
|
||||
# method: BFGS, CG, Nelder-Mead
|
||||
no_penalty_cg <- optim(beta_init, # guess
|
||||
fn=function(beta){penalized_loss(X, y, beta, lam, alpha=0)}, # objective
|
||||
gr=function(beta){ridge_grad(X, y, beta, lam)}, # gradient
|
||||
method = method) #, control= list(trace = 2))
|
||||
return(list(loss=no_penalty_cg$value, betas = no_penalty_cg$par))
|
||||
}
|
||||
|
||||
lasso_betas <- function(X,y){
|
||||
ridge_betas(X,y,beta_init=NULL,lam=0,alpha=0,method="BFGS")
|
||||
}
|
||||
source("Schrick-Noah_Ridge-LASSO-Regression.R")
|
||||
|
||||
### Use npdro simulated data to test
|
||||
if (!require("devtools")) install.packages("devtools")
|
||||
library(devtools)
|
||||
install_github("insilico/npdro")
|
||||
if (!require("npdro")) install.packages("npdro")
|
||||
library(npdro)
|
||||
if (!require("dplyr")) install.packages("dplyr")
|
||||
library(dplyr)
|
||||
|
||||
num.samples <- 300
|
||||
num.variables <- 100
|
||||
dataset <- npdro::createSimulation2(num.samples=num.samples,
|
||||
num.variables=num.variables,
|
||||
pct.imbalance=0.5,
|
||||
pct.signals=0.2,
|
||||
main.bias=0.5,
|
||||
interaction.bias=1,
|
||||
hi.cor=0.95,
|
||||
lo.cor=0.2,
|
||||
mix.type="main-interactionScalefree",
|
||||
label="class",
|
||||
sim.type="mixed",
|
||||
pct.mixed=0.5,
|
||||
pct.train=0.5,
|
||||
pct.holdout=0.5,
|
||||
pct.validation=0,
|
||||
plot.graph=F,
|
||||
graph.structure = NULL,
|
||||
verbose=T)
|
||||
train <- dataset$train #150x101
|
||||
test <- dataset$holdout
|
||||
validation <- dataset$validation
|
||||
dataset$signal.names
|
||||
colnames(train)
|
||||
|
||||
# separate the class vector from the predictor data matrix
|
||||
train.X <- train[, -which(colnames(train) == "class")]
|
||||
train.y <- train[, "class"]
|
||||
train.y.01 <- as.numeric(train.y)-1
|
||||
|
||||
lambda <- 0
|
||||
unpen_beta <- lasso_betas(train.X, train.y)
|
||||
for(beta in unpen_beta$betas){
|
||||
if(abs(beta) <= lambda){
|
||||
beta <- 0
|
||||
}
|
||||
else if (beta > lambda){
|
||||
beta <- beta-lambda
|
||||
}
|
||||
else{
|
||||
beta <- beta+lambda
|
||||
}
|
||||
}
|
||||
source("Schrick-Noah_Simulated-Data.R")
|
||||
bundled_data <- create_data()
|
||||
# bundled_data$train.X = train.X
|
||||
|
||||
lasso.df <- data.frame(att=c("intercept", colnames(train.X)),
|
||||
scores=unpen_beta$betas,
|
||||
@ -122,53 +20,11 @@ dplyr::slice_max(lasso.df,order_by=abs_scores,n=20)
|
||||
### Compare with Ridge
|
||||
|
||||
### Compare with Random Forest
|
||||
if (!require("randomForest")) install.packages("randomForest")
|
||||
library(randomForest)
|
||||
if (!require("ranger")) install.packages("ranger")
|
||||
library(ranger)
|
||||
|
||||
rf_comp <- function(train){
|
||||
rf<-randomForest(as.factor(train$class) ~ .,data=train, ntree=5000,
|
||||
importance=T)
|
||||
print(rf) # error
|
||||
detach("package:ranger", unload=TRUE)
|
||||
rf_imp<-data.frame(rf_score=importance(rf, type=1)) # Cannot do if ranger is loaded
|
||||
#dplyr::arrange(rf_imp,-MeanDecreaseAccuracy)
|
||||
print(dplyr::slice_max(rf_imp,order_by=MeanDecreaseAccuracy, n=20))
|
||||
|
||||
library(ranger)
|
||||
rf2<-ranger(as.factor(train$class) ~ ., data=train, num.trees=5000,
|
||||
importance="permutation")
|
||||
print(rf2) # error
|
||||
rf2_imp<-data.frame(rf_score=rf2$variable.importance)
|
||||
#dplyr::arrange(rf_imp,-MeanDecreaseAccuracy)
|
||||
print(dplyr::slice_max(rf2_imp,order_by=rf_score, n=20))
|
||||
|
||||
#rftest <- predict(rf, newdata=test, type="class")
|
||||
#confusionMatrix(table(rftest,test$class))
|
||||
}
|
||||
|
||||
source("Schrick-Noah_Random-Forest.R")
|
||||
rf_comp(train)
|
||||
|
||||
### Compare with glmnet
|
||||
if (!require("glmnet")) install.packages("glmnet")
|
||||
library(glmnet)
|
||||
|
||||
glm_fcn <- function(train.X, train.y, alpha_p){
|
||||
glmnet.class.model<-cv.glmnet(as.matrix(train.X), train.y, alpha=alpha_p,
|
||||
family="binomial", type.measure="class")
|
||||
glmnet.class.model$lambda.1se
|
||||
glmnet.class.model$lambda.min
|
||||
plot(glmnet.class.model)
|
||||
glmnet.class.coeffs<-predict(glmnet.class.model,type="coefficients")
|
||||
#glmnet.cc.coeffs # maybe 3 is most important, Excess kurtosis
|
||||
model.class.terms <- colnames(train.X) # glmnet includes an intercept but we are going to ignore
|
||||
#nonzero.glmnet.qtrait.coeffs <- model.qtrait.terms[glmnet.qtrait.coeffs@i[which(glmnet.qtrait.coeffs@i!=0)]] # skip intercept if there, 0-based counting
|
||||
|
||||
glmnet.df <- data.frame(as.matrix(glmnet.class.coeffs))
|
||||
glmnet.df$abs_scores <- abs(glmnet.df$lambda.1se)
|
||||
dplyr::slice_max(glmnet.df,order_by=abs_scores,n=21)
|
||||
}
|
||||
source("Schrick-Noah_glmnet.R")
|
||||
|
||||
#### Alpha = 0
|
||||
glm_fcn(train.X, train.y, 0)
|
||||
|
||||
25
Schrick-Noah_Random-Forest.R
Normal file
25
Schrick-Noah_Random-Forest.R
Normal file
@ -0,0 +1,25 @@
|
||||
if (!require("randomForest")) install.packages("randomForest")
|
||||
library(randomForest)
|
||||
if (!require("ranger")) install.packages("ranger")
|
||||
library(ranger)
|
||||
|
||||
rf_comp <- function(train){
|
||||
rf<-randomForest(as.factor(train$class) ~ .,data=train, ntree=5000,
|
||||
importance=T)
|
||||
print(rf) # error
|
||||
detach("package:ranger", unload=TRUE)
|
||||
rf_imp<-data.frame(rf_score=importance(rf, type=1)) # Cannot do if ranger is loaded
|
||||
#dplyr::arrange(rf_imp,-MeanDecreaseAccuracy)
|
||||
print(dplyr::slice_max(rf_imp,order_by=MeanDecreaseAccuracy, n=20))
|
||||
|
||||
library(ranger)
|
||||
rf2<-ranger(as.factor(train$class) ~ ., data=train, num.trees=5000,
|
||||
importance="permutation")
|
||||
print(rf2) # error
|
||||
rf2_imp<-data.frame(rf_score=rf2$variable.importance)
|
||||
#dplyr::arrange(rf_imp,-MeanDecreaseAccuracy)
|
||||
print(dplyr::slice_max(rf2_imp,order_by=rf_score, n=20))
|
||||
|
||||
#rftest <- predict(rf, newdata=test, type="class")
|
||||
#confusionMatrix(table(rftest,test$class))
|
||||
}
|
||||
71
Schrick-Noah_Ridge-LASSO-Regression.R
Normal file
71
Schrick-Noah_Ridge-LASSO-Regression.R
Normal file
@ -0,0 +1,71 @@
|
||||
penalized_loss <- function(X, y, beta, lam, alpha=0){
|
||||
# y needs to be 0/1
|
||||
# beta: regression coefficients
|
||||
# lam: penalty, lam=0 un-penalized logistic regression
|
||||
# alpha = 0 ridge penalty, alpha = 1 lasso penalty
|
||||
m <- nrow(X)
|
||||
Xtilde <- as.matrix(cbind(intercept=rep(1,m), X))
|
||||
cnames <- colnames(Xtilde)
|
||||
z <- Xtilde %*% beta # column vector
|
||||
yhat <- 1/(1+exp(-z))
|
||||
yclass <- as.numeric(y)
|
||||
# 1. logistic unpenalized loss
|
||||
penal.loss <- sum(-yclass*log(yhat) - (1-yclass)*log(1-yhat))/m +
|
||||
# 2. penalty, lam=0 removes penalty
|
||||
lam*((1-alpha)*lam*sum(beta*beta)/2 + # ridge
|
||||
alpha*sum(abs(beta))) # lasso
|
||||
return(penal.loss)
|
||||
}
|
||||
|
||||
ridge_grad <- function(X, y, beta, lam){
|
||||
# y needs to be 0/1
|
||||
# also works for non-penalized logistic regression if lam=0
|
||||
m <- nrow(X)
|
||||
p <- ncol(X)
|
||||
Xtilde <- as.matrix(cbind(intercept=rep(1,m), X))
|
||||
cnames <- colnames(Xtilde)
|
||||
z <- Xtilde %*% beta # column vector
|
||||
yhat <- 1/(1+exp(-z))
|
||||
yclass <- as.numeric(y)
|
||||
grad <- rep(0,p+1)
|
||||
for (a in seq(1,p+1)){
|
||||
beta_a <- beta[a] # input beta from previous descent step
|
||||
Loss.grad <- sum(-yclass*(1-yhat)*Xtilde[,a] +
|
||||
(1-yclass)*yhat*Xtilde[,a])
|
||||
grad[a] <- Loss.grad + lam*beta_a
|
||||
} # end for loop
|
||||
grad <- grad/m
|
||||
return(grad)
|
||||
}
|
||||
|
||||
### gradient descent to optimize beta's
|
||||
ridge_betas <- function(X,y,beta_init=NULL,lam, alpha=0, method="BFGS"){
|
||||
if (is.null(beta_init)){beta_init <- rep(.1, ncol(X)+1)}
|
||||
# method: BFGS, CG, Nelder-Mead
|
||||
no_penalty_cg <- optim(beta_init, # guess
|
||||
fn=function(beta){penalized_loss(X, y, beta, lam, alpha=0)}, # objective
|
||||
gr=function(beta){ridge_grad(X, y, beta, lam)}, # gradient
|
||||
method = method) #, control= list(trace = 2))
|
||||
return(list(loss=no_penalty_cg$value, betas = no_penalty_cg$par))
|
||||
}
|
||||
|
||||
# Regression coeffs for LASSO
|
||||
lasso_betas <- function(X,y){
|
||||
ridge_betas(X,y,beta_init=NULL,lam=0,alpha=0,method="BFGS")
|
||||
}
|
||||
|
||||
# Adjust betas
|
||||
unpen_coeff <- function(X, y, lambda=0){
|
||||
unpen_beta <- lasso_betas(X, y)
|
||||
for(beta in unpen_beta$betas){
|
||||
if(abs(beta) <= lambda){
|
||||
beta <- 0
|
||||
}
|
||||
else if (beta > lambda){
|
||||
beta <- beta-lambda
|
||||
}
|
||||
else{
|
||||
beta <- beta+lambda
|
||||
}
|
||||
}
|
||||
}
|
||||
54
Schrick-Noah_Simulated-Data.R
Normal file
54
Schrick-Noah_Simulated-Data.R
Normal file
@ -0,0 +1,54 @@
|
||||
if (!require("devtools")) install.packages("devtools")
|
||||
library(devtools)
|
||||
install_github("insilico/npdro")
|
||||
if (!require("npdro")) install.packages("npdro")
|
||||
library(npdro)
|
||||
if (!require("dplyr")) install.packages("dplyr")
|
||||
library(dplyr)
|
||||
|
||||
create_data <- function(num.samples=300, num.variables=100,
|
||||
pct.imbalance=0.5,pct.signals=0.2,
|
||||
main.bias=0.5,interaction.bias=1,
|
||||
hi.cor=0.95,lo.cor=0.2,
|
||||
mix.type="main-interactionScalefree",
|
||||
label="class",sim.type="mixed",
|
||||
pct.mixed=0.5,pct.train=0.5,
|
||||
pct.holdout=0.5,pct.validation=0,
|
||||
plot.graph=F,graph.structure = NULL,
|
||||
verbose=T){
|
||||
|
||||
dataset <- npdro::createSimulation2(num.samples=num.samples,
|
||||
num.variables=num.variables,
|
||||
pct.imbalance=pct.imbalance,
|
||||
pct.signals=pct.signals,
|
||||
main.bias=main.bias,
|
||||
interaction.bias=interaction.bias,
|
||||
hi.cor=hi.cor,
|
||||
lo.cor=lo.cor,
|
||||
mix.type=mix.type,
|
||||
label=label,
|
||||
sim.type=sim.type,
|
||||
pct.mixed=pct.mixed,
|
||||
pct.train=pct.train,
|
||||
pct.holdout=pct.holdout,
|
||||
pct.validation=pct.validation,
|
||||
plot.graph=plot.graph,
|
||||
graph.structure = graph.structure,
|
||||
verbose=verbose)
|
||||
|
||||
train <- dataset$train #150x101
|
||||
test <- dataset$holdout
|
||||
validation <- dataset$validation
|
||||
dataset$signal.names
|
||||
colnames(train)
|
||||
|
||||
# separate the class vector from the predictor data matrix
|
||||
train.X <- train[, -which(colnames(train) == "class")]
|
||||
train.y <- train[, "class"]
|
||||
train.y.01 <- as.numeric(train.y)-1
|
||||
|
||||
return(list(train=train, test=test, train.X=train.X, train.y=train.y,
|
||||
validation=validation, train.y.01=train.y.01))
|
||||
}
|
||||
|
||||
|
||||
18
Schrick-Noah_glmnet.R
Normal file
18
Schrick-Noah_glmnet.R
Normal file
@ -0,0 +1,18 @@
|
||||
if (!require("glmnet")) install.packages("glmnet")
|
||||
library(glmnet)
|
||||
|
||||
glm_fcn <- function(train.X, train.y, alpha_p){
|
||||
glmnet.class.model<-cv.glmnet(as.matrix(train.X), train.y, alpha=alpha_p,
|
||||
family="binomial", type.measure="class")
|
||||
glmnet.class.model$lambda.1se
|
||||
glmnet.class.model$lambda.min
|
||||
plot(glmnet.class.model)
|
||||
glmnet.class.coeffs<-predict(glmnet.class.model,type="coefficients")
|
||||
#glmnet.cc.coeffs # maybe 3 is most important, Excess kurtosis
|
||||
model.class.terms <- colnames(train.X) # glmnet includes an intercept but we are going to ignore
|
||||
#nonzero.glmnet.qtrait.coeffs <- model.qtrait.terms[glmnet.qtrait.coeffs@i[which(glmnet.qtrait.coeffs@i!=0)]] # skip intercept if there, 0-based counting
|
||||
|
||||
glmnet.df <- data.frame(as.matrix(glmnet.class.coeffs))
|
||||
glmnet.df$abs_scores <- abs(glmnet.df$lambda.1se)
|
||||
dplyr::slice_max(glmnet.df,order_by=abs_scores,n=21)
|
||||
}
|
||||
Loading…
x
Reference in New Issue
Block a user