216 lines
7.1 KiB
R
216 lines
7.1 KiB
R
# Project 6 for the University of Tulsa's CS-7863 Sci-Stat Course
|
|
# Penalized Machine Learning
|
|
# Professor: Dr. McKinney, Spring 2023
|
|
# Noah L. Schrick - 1492657
|
|
|
|
# 1. Penalized Regression and Classification
|
|
## a. Modified Ridge classification for LASSO penalties
|
|
# gradient descent to optimize beta's
|
|
ridge_betas <- function(X,y,beta_init=NULL,lam, alpha=0, method="BFGS"){
|
|
if (is.null(beta_init)){beta_init <- rep(.1, ncol(X)+1)}
|
|
# method: BFGS, CG, Nelder-Mead
|
|
no_penalty_cg <- optim(beta_init, # guess
|
|
fn=function(beta){penalized_loss(X, y, beta, lam, alpha=0)}, # objective
|
|
gr=function(beta){ridge_grad(X, y, beta, lam)}, # gradient
|
|
method = method) #, control= list(trace = 2))
|
|
return(list(loss=no_penalty_cg$value, betas = no_penalty_cg$par))
|
|
}
|
|
|
|
lasso_betas <- function(X,y){
|
|
ridge_betas(X,y,beta_init=NULL,lam=0,alpha=0,method="BFGS")
|
|
}
|
|
|
|
### Use npdro simulated data to test
|
|
if (!require("devtools")) install.packages("devtools")
|
|
library(devtools)
|
|
install_github("insilico/npdro")
|
|
if (!require("npdro")) install.packages("npdro")
|
|
library(npdro)
|
|
if (!require("dplyr")) install.packages("dplyr")
|
|
library(dplyr)
|
|
|
|
num.samples <- 300
|
|
num.variables <- 100
|
|
dataset <- npdro::createSimulation2(num.samples=num.samples,
|
|
num.variables=num.variables,
|
|
pct.imbalance=0.5,
|
|
pct.signals=0.2,
|
|
main.bias=0.5,
|
|
interaction.bias=1,
|
|
hi.cor=0.95,
|
|
lo.cor=0.2,
|
|
mix.type="main-interactionScalefree",
|
|
label="class",
|
|
sim.type="mixed",
|
|
pct.mixed=0.5,
|
|
pct.train=0.5,
|
|
pct.holdout=0.5,
|
|
pct.validation=0,
|
|
plot.graph=F,
|
|
graph.structure = NULL,
|
|
verbose=T)
|
|
train <- dataset$train #150x101
|
|
test <- dataset$holdout
|
|
validation <- dataset$validation
|
|
dataset$signal.names
|
|
colnames(train)
|
|
|
|
### Compare with Ridge
|
|
|
|
### Compare with Random Forest
|
|
if (!require("randomForest")) install.packages("randomForest")
|
|
library(randomForest)
|
|
if (!require("ranger")) install.packages("ranger")
|
|
library(ranger)
|
|
|
|
rf_comp <- function(train){
|
|
rf<-randomForest(as.factor(train$class) ~ .,data=train, ntree=5000,
|
|
importance=T)
|
|
print(rf) # error
|
|
rf_imp<-data.frame(rf_score=importance(rf, type=1))
|
|
#dplyr::arrange(rf_imp,-MeanDecreaseAccuracy)
|
|
dplyr::slice_max(rf_imp,order_by=MeanDecreaseAccuracy, n=20)
|
|
|
|
rf2<-ranger(as.factor(train$class) ~ ., data=train, num.trees=5000,
|
|
importance="permutation")
|
|
print(rf2) # error
|
|
rf2_imp<-data.frame(rf_score=rf2$variable.importance)
|
|
#dplyr::arrange(rf_imp,-MeanDecreaseAccuracy)
|
|
dplyr::slice_max(rf2_imp,order_by=rf_score, n=20)
|
|
|
|
#rftest <- predict(rf, newdata=test, type="class")
|
|
#confusionMatrix(table(rftest,test$class))
|
|
}
|
|
|
|
rf_comp(train)
|
|
|
|
### Compare with glmnet
|
|
if (!require("glmnet")) install.packages("glmnet")
|
|
library(glmnet)
|
|
|
|
glm_fcn <- function(train.X, train.y, alpha_p){
|
|
glmnet.class.model<-cv.glmnet(as.matrix(train.X), train.y, alpha=alpha_p,
|
|
family="binomial", type.measure="class")
|
|
glmnet.class.model$lambda.1se
|
|
glmnet.class.model$lambda.min
|
|
plot(glmnet.class.model)
|
|
glmnet.class.coeffs<-predict(glmnet.class.model,type="coefficients")
|
|
#glmnet.cc.coeffs # maybe 3 is most important, Excess kurtosis
|
|
model.class.terms <- colnames(train.X) # glmnet includes an intercept but we are going to ignore
|
|
#nonzero.glmnet.qtrait.coeffs <- model.qtrait.terms[glmnet.qtrait.coeffs@i[which(glmnet.qtrait.coeffs@i!=0)]] # skip intercept if there, 0-based counting
|
|
|
|
glmnet.df <- data.frame(as.matrix(glmnet.class.coeffs))
|
|
glmnet.df$abs_scores <- abs(glmnet.df$lambda.1se)
|
|
dplyr::slice_max(glmnet.df,order_by=abs_scores,n=21)
|
|
}
|
|
|
|
#### Alpha = 0
|
|
glm_fcn(train.X, train.y, 0)
|
|
|
|
#### Alpha = 1
|
|
glm_fcn(train.X, train.y, 1)
|
|
|
|
## b. Repeat comparison using a graph with clusters
|
|
if (!require("igraph")) install.packages("igraph")
|
|
library(igraph)
|
|
if (!require("Matrix")) install.packages("Matrix")
|
|
library(Matrix) # bdiag
|
|
|
|
npc <-25 # nodes per cluster
|
|
n_clust <- 4 # 4 clusters with 25 nodes each
|
|
|
|
# no clusters
|
|
g0 <- erdos.renyi.game(npc*n_clust, 0.2)
|
|
plot(g0)
|
|
|
|
matlist = list()
|
|
for (i in 1:n_clust){
|
|
matlist[[i]] = get.adjacency(erdos.renyi.game(npc, 0.2))
|
|
}
|
|
|
|
# merge clusters into one matrix
|
|
mat_clust <- bdiag(matlist) # create block-diagonal matrix
|
|
|
|
## the following two things might not be necessary
|
|
|
|
# check for loner nodes, connected to nothing, and join them to something
|
|
k <- rowSums(mat_clust)
|
|
node_vector <- seq(1,npc*n_clust)
|
|
for (i in node_vector){
|
|
if (k[i]==0){ # if k=0, connect to something random
|
|
j <- sample(node_vector[-i],1)
|
|
mat_clust[i,j] <- 1
|
|
mat_clust[j,i] <- 1
|
|
}
|
|
}
|
|
|
|
node_colors <- c(rep("red",npc), rep("green",npc), rep("blue",npc), rep("orange",npc))
|
|
g1 <- graph_from_adjacency_matrix(mat_clust, mode="undirected", diag=F)
|
|
plot(g1, vertex.color=node_colors)
|
|
|
|
## c. Use npdro and igraph to create knn
|
|
my.k <- 3 # larger k, fewer clusters
|
|
npdr.nbpairs.idx <- npdro::nearestNeighbors(t(train.X),
|
|
# transpose does dist between predictors
|
|
# without transpose does dist between samples
|
|
#nbd.method="multisurf", k=0,
|
|
nbd.method = "relieff",
|
|
nbd.metric="manhattan",
|
|
k=my.k)
|
|
knn.graph <- graph_from_edgelist(as.matrix(npdr.nbpairs.idx),
|
|
directed=F)
|
|
knn.graph <- simplify(knn.graph)
|
|
|
|
### Plot network
|
|
plot.igraph(knn.graph,layout=layout_with_fr(knn.graph),
|
|
vertex.color="red",
|
|
vertex.size=3,vertex.label=NA,
|
|
main="Manhattan, knn-graph")
|
|
|
|
|
|
## d. Add Laplace graph penalty
|
|
|
|
### Find resulting beta coeffs
|
|
|
|
### Optimize or choose value for lambda2
|
|
|
|
### Compare to a) and b)
|
|
|
|
# 2. Gradient Descent
|
|
## Write fn with learning param
|
|
grad.rosen <- function(xvec, a=2, b=100){
|
|
x <- xvec[1];
|
|
y <- xvec[2];
|
|
f.x <- -2*(a-x) - 4*b*x*(y-x^2)
|
|
f.y <- 2*b*(y-x^2)
|
|
return( c(f.x, f.y))
|
|
}
|
|
|
|
a = 2
|
|
b=100
|
|
alpha = .0001 # learning rate
|
|
p = c(0,0) # start for momentum
|
|
|
|
xy = c(-1.8, 3.0) # guess for solution
|
|
|
|
# gradient descent
|
|
epochs = 1000000
|
|
for (epoch in 1:epochs){
|
|
p = -grad.rosen(xy,a,b);
|
|
xy = xy + alpha*p;
|
|
}
|
|
|
|
print(xy) # Should be: ~(2,4)
|
|
|
|
# Using optim:
|
|
f.rosen <- function(xvec, a=2, b=100){
|
|
#a <- 2; b <- 1000;
|
|
x <- xvec[1];
|
|
y <- xvec[2];
|
|
return ( (a-x)^2 + b*(y-x^2)^2)
|
|
}
|
|
|
|
sol.BFGS <- optim(par=c(-1.8,3.0), fn=function(x){f.rosen(x,a=2,b=100)},
|
|
gr=function(x){grad.rosen(x,a=2,b=100)}, method="BFGS")
|
|
sol.BFGS$par
|