313 lines
10 KiB
R
313 lines
10 KiB
R
# Project 6 for the University of Tulsa's CS-7863 Sci-Stat Course
|
|
# Penalized Machine Learning
|
|
# Professor: Dr. McKinney, Spring 2023
|
|
# Noah L. Schrick - 1492657
|
|
|
|
# 1. Penalized Regression and Classification
|
|
## a. Modified Ridge classification for LASSO penalties
|
|
penalized_loss <- function(X, y, beta, lam, alpha=0){
|
|
# y needs to be 0/1
|
|
# beta: regression coefficients
|
|
# lam: penalty, lam=0 un-penalized logistic regression
|
|
# alpha = 0 ridge penalty, alpha = 1 lasso penalty
|
|
m <- nrow(X)
|
|
Xtilde <- as.matrix(cbind(intercept=rep(1,m), X))
|
|
cnames <- colnames(Xtilde)
|
|
z <- Xtilde %*% beta # column vector
|
|
yhat <- 1/(1+exp(-z))
|
|
yclass <- as.numeric(y)
|
|
# 1. logistic unpenalized loss
|
|
penal.loss <- sum(-yclass*log(yhat) - (1-yclass)*log(1-yhat))/m +
|
|
# 2. penalty, lam=0 removes penalty
|
|
lam*((1-alpha)*lam*sum(beta*beta)/2 + # ridge
|
|
alpha*sum(abs(beta))) # lasso
|
|
return(penal.loss)
|
|
}
|
|
|
|
ridge_grad <- function(X, y, beta, lam){
|
|
# y needs to be 0/1
|
|
# also works for non-penalized logistic regression if lam=0
|
|
m <- nrow(X)
|
|
p <- ncol(X)
|
|
Xtilde <- as.matrix(cbind(intercept=rep(1,m), X))
|
|
cnames <- colnames(Xtilde)
|
|
z <- Xtilde %*% beta # column vector
|
|
yhat <- 1/(1+exp(-z))
|
|
yclass <- as.numeric(y)
|
|
grad <- rep(0,p+1)
|
|
for (a in seq(1,p+1)){
|
|
beta_a <- beta[a] # input beta from previous descent step
|
|
Loss.grad <- sum(-yclass*(1-yhat)*Xtilde[,a] +
|
|
(1-yclass)*yhat*Xtilde[,a])
|
|
grad[a] <- Loss.grad + lam*beta_a
|
|
} # end for loop
|
|
grad <- grad/m
|
|
return(grad)
|
|
}
|
|
|
|
### gradient descent to optimize beta's
|
|
ridge_betas <- function(X,y,beta_init=NULL,lam, alpha=0, method="BFGS"){
|
|
if (is.null(beta_init)){beta_init <- rep(.1, ncol(X)+1)}
|
|
# method: BFGS, CG, Nelder-Mead
|
|
no_penalty_cg <- optim(beta_init, # guess
|
|
fn=function(beta){penalized_loss(X, y, beta, lam, alpha=0)}, # objective
|
|
gr=function(beta){ridge_grad(X, y, beta, lam)}, # gradient
|
|
method = method) #, control= list(trace = 2))
|
|
return(list(loss=no_penalty_cg$value, betas = no_penalty_cg$par))
|
|
}
|
|
|
|
lasso_betas <- function(X,y){
|
|
ridge_betas(X,y,beta_init=NULL,lam=0,alpha=0,method="BFGS")
|
|
}
|
|
|
|
### Use npdro simulated data to test
|
|
if (!require("devtools")) install.packages("devtools")
|
|
library(devtools)
|
|
install_github("insilico/npdro")
|
|
if (!require("npdro")) install.packages("npdro")
|
|
library(npdro)
|
|
if (!require("dplyr")) install.packages("dplyr")
|
|
library(dplyr)
|
|
|
|
num.samples <- 300
|
|
num.variables <- 100
|
|
dataset <- npdro::createSimulation2(num.samples=num.samples,
|
|
num.variables=num.variables,
|
|
pct.imbalance=0.5,
|
|
pct.signals=0.2,
|
|
main.bias=0.5,
|
|
interaction.bias=1,
|
|
hi.cor=0.95,
|
|
lo.cor=0.2,
|
|
mix.type="main-interactionScalefree",
|
|
label="class",
|
|
sim.type="mixed",
|
|
pct.mixed=0.5,
|
|
pct.train=0.5,
|
|
pct.holdout=0.5,
|
|
pct.validation=0,
|
|
plot.graph=F,
|
|
graph.structure = NULL,
|
|
verbose=T)
|
|
train <- dataset$train #150x101
|
|
test <- dataset$holdout
|
|
validation <- dataset$validation
|
|
dataset$signal.names
|
|
colnames(train)
|
|
|
|
# separate the class vector from the predictor data matrix
|
|
train.X <- train[, -which(colnames(train) == "class")]
|
|
train.y <- train[, "class"]
|
|
train.y.01 <- as.numeric(train.y)-1
|
|
|
|
lambda <- 0
|
|
unpen_beta <- lasso_betas(train.X, train.y)
|
|
for(beta in unpen_beta$betas){
|
|
if(abs(beta) <= lambda){
|
|
beta <- 0
|
|
}
|
|
else if (beta > lambda){
|
|
beta <- beta-lambda
|
|
}
|
|
else{
|
|
beta <- beta+lambda
|
|
}
|
|
}
|
|
|
|
lasso.df <- data.frame(att=c("intercept", colnames(train.X)),
|
|
scores=unpen_beta$betas,
|
|
abs_scores=abs(unpen_beta$betas))
|
|
dplyr::slice_max(lasso.df,order_by=abs_scores,n=20)
|
|
|
|
### Compare with Ridge
|
|
|
|
### Compare with Random Forest
|
|
if (!require("randomForest")) install.packages("randomForest")
|
|
library(randomForest)
|
|
if (!require("ranger")) install.packages("ranger")
|
|
library(ranger)
|
|
|
|
rf_comp <- function(train){
|
|
rf<-randomForest(as.factor(train$class) ~ .,data=train, ntree=5000,
|
|
importance=T)
|
|
print(rf) # error
|
|
detach("package:ranger", unload=TRUE)
|
|
rf_imp<-data.frame(rf_score=importance(rf, type=1)) # Cannot do if ranger is loaded
|
|
#dplyr::arrange(rf_imp,-MeanDecreaseAccuracy)
|
|
print(dplyr::slice_max(rf_imp,order_by=MeanDecreaseAccuracy, n=20))
|
|
|
|
library(ranger)
|
|
rf2<-ranger(as.factor(train$class) ~ ., data=train, num.trees=5000,
|
|
importance="permutation")
|
|
print(rf2) # error
|
|
rf2_imp<-data.frame(rf_score=rf2$variable.importance)
|
|
#dplyr::arrange(rf_imp,-MeanDecreaseAccuracy)
|
|
print(dplyr::slice_max(rf2_imp,order_by=rf_score, n=20))
|
|
|
|
#rftest <- predict(rf, newdata=test, type="class")
|
|
#confusionMatrix(table(rftest,test$class))
|
|
}
|
|
|
|
rf_comp(train)
|
|
|
|
### Compare with glmnet
|
|
if (!require("glmnet")) install.packages("glmnet")
|
|
library(glmnet)
|
|
|
|
glm_fcn <- function(train.X, train.y, alpha_p){
|
|
glmnet.class.model<-cv.glmnet(as.matrix(train.X), train.y, alpha=alpha_p,
|
|
family="binomial", type.measure="class")
|
|
glmnet.class.model$lambda.1se
|
|
glmnet.class.model$lambda.min
|
|
plot(glmnet.class.model)
|
|
glmnet.class.coeffs<-predict(glmnet.class.model,type="coefficients")
|
|
#glmnet.cc.coeffs # maybe 3 is most important, Excess kurtosis
|
|
model.class.terms <- colnames(train.X) # glmnet includes an intercept but we are going to ignore
|
|
#nonzero.glmnet.qtrait.coeffs <- model.qtrait.terms[glmnet.qtrait.coeffs@i[which(glmnet.qtrait.coeffs@i!=0)]] # skip intercept if there, 0-based counting
|
|
|
|
glmnet.df <- data.frame(as.matrix(glmnet.class.coeffs))
|
|
glmnet.df$abs_scores <- abs(glmnet.df$lambda.1se)
|
|
dplyr::slice_max(glmnet.df,order_by=abs_scores,n=21)
|
|
}
|
|
|
|
#### Alpha = 0
|
|
glm_fcn(train.X, train.y, 0)
|
|
|
|
#### Alpha = 1
|
|
glm_fcn(train.X, train.y, 1)
|
|
|
|
## b. Repeat comparison using a graph with clusters
|
|
if (!require("igraph")) install.packages("igraph")
|
|
library(igraph)
|
|
if (!require("Matrix")) install.packages("Matrix")
|
|
library(Matrix) # bdiag
|
|
|
|
npc <-25 # nodes per cluster
|
|
n_clust <- 4 # 4 clusters with 25 nodes each
|
|
|
|
# no clusters
|
|
g0 <- erdos.renyi.game(npc*n_clust, 0.2)
|
|
plot(g0)
|
|
|
|
matlist = list()
|
|
for (i in 1:n_clust){
|
|
matlist[[i]] = get.adjacency(erdos.renyi.game(npc, 0.2))
|
|
}
|
|
|
|
# merge clusters into one matrix
|
|
mat_clust <- bdiag(matlist) # create block-diagonal matrix
|
|
|
|
## the following two things might not be necessary
|
|
|
|
# check for loner nodes, connected to nothing, and join them to something
|
|
k <- rowSums(mat_clust)
|
|
node_vector <- seq(1,npc*n_clust)
|
|
for (i in node_vector){
|
|
if (k[i]==0){ # if k=0, connect to something random
|
|
j <- sample(node_vector[-i],1)
|
|
mat_clust[i,j] <- 1
|
|
mat_clust[j,i] <- 1
|
|
}
|
|
}
|
|
|
|
node_colors <- c(rep("red",npc), rep("green",npc), rep("blue",npc), rep("orange",npc))
|
|
g1 <- graph_from_adjacency_matrix(mat_clust, mode="undirected", diag=F)
|
|
plot(g1, vertex.color=node_colors)
|
|
|
|
### Dataset with g1
|
|
dataset.graph <- npdro::createSimulation2(num.samples=num.samples,
|
|
num.variables=num.variables,
|
|
pct.imbalance=0.5,
|
|
pct.signals=0.2,
|
|
main.bias=0.5,
|
|
interaction.bias=1,
|
|
hi.cor=0.95,
|
|
lo.cor=0.2,
|
|
mix.type="main-interactionScalefree",
|
|
label="class",
|
|
sim.type="mixed",
|
|
pct.mixed=0.5,
|
|
pct.train=0.5,
|
|
pct.holdout=0.5,
|
|
pct.validation=0,
|
|
plot.graph=F,
|
|
graph.structure = g1,
|
|
verbose=T)
|
|
|
|
train.graph <- dataset.graph$train #150x101
|
|
test.graph <- dataset.graph$holdout
|
|
validation.graph <- dataset.graph$validation
|
|
dataset.graph$signal.names
|
|
colnames(train.graph)
|
|
|
|
# separate the class vector from the predictor data matrix
|
|
train.graph.X <- train.graph[, -which(colnames(train.graph) == "class")]
|
|
train.graph.y <- train.graph[, "class"]
|
|
train.graph.y.01 <- as.numeric(train.graph.y)-1
|
|
|
|
## c. Use npdro and igraph to create knn
|
|
my.k <- 3 # larger k, fewer clusters
|
|
npdr.nbpairs.idx <- npdro::nearestNeighbors(t(train.X),
|
|
# transpose does dist between predictors
|
|
# without transpose does dist between samples
|
|
#nbd.method="multisurf", k=0,
|
|
nbd.method = "relieff",
|
|
nbd.metric="manhattan",
|
|
k=my.k)
|
|
knn.graph <- graph_from_edgelist(as.matrix(npdr.nbpairs.idx),
|
|
directed=F)
|
|
knn.graph <- simplify(knn.graph)
|
|
|
|
### Plot network
|
|
plot.igraph(knn.graph,layout=layout_with_fr(knn.graph),
|
|
vertex.color="red",
|
|
vertex.size=3,vertex.label=NA,
|
|
main="Manhattan, knn-graph")
|
|
|
|
|
|
## d. Add Laplace graph penalty
|
|
|
|
### Find resulting beta coeffs
|
|
|
|
### Optimize or choose value for lambda2
|
|
|
|
### Compare to a) and b)
|
|
|
|
# 2. Gradient Descent
|
|
## Write fn with learning param
|
|
grad.rosen <- function(xvec, a=2, b=100){
|
|
x <- xvec[1];
|
|
y <- xvec[2];
|
|
f.x <- -2*(a-x) - 4*b*x*(y-x^2)
|
|
f.y <- 2*b*(y-x^2)
|
|
return( c(f.x, f.y))
|
|
}
|
|
|
|
a = 2
|
|
b=100
|
|
alpha = .0001 # learning rate
|
|
p = c(0,0) # start for momentum
|
|
|
|
xy = c(-1.8, 3.0) # guess for solution
|
|
|
|
# gradient descent
|
|
epochs = 1000000
|
|
for (epoch in 1:epochs){
|
|
p = -grad.rosen(xy,a,b);
|
|
xy = xy + alpha*p;
|
|
}
|
|
|
|
print(xy) # Should be: ~(2,4)
|
|
|
|
# Using optim:
|
|
f.rosen <- function(xvec, a=2, b=100){
|
|
#a <- 2; b <- 1000;
|
|
x <- xvec[1];
|
|
y <- xvec[2];
|
|
return ( (a-x)^2 + b*(y-x^2)^2)
|
|
}
|
|
|
|
sol.BFGS <- optim(par=c(-1.8,3.0), fn=function(x){f.rosen(x,a=2,b=100)},
|
|
gr=function(x){grad.rosen(x,a=2,b=100)}, method="BFGS")
|
|
sol.BFGS$par
|