183 lines
6.6 KiB
R
183 lines
6.6 KiB
R
# Homework 6 for the University of Tulsa's CS-7863 Sci-Stat Course
|
|
# Penalized Machine Learning
|
|
# Professor: Dr. McKinney, Spring 2023
|
|
# Noah L. Schrick - 1492657
|
|
|
|
if (!require("data.table")) install.packages("data.table")
|
|
library(data.table)
|
|
|
|
# 1. Penalized Regression and Classification
|
|
## a. Modified Ridge classification for LASSO penalties
|
|
source("Schrick-Noah_Ridge-LASSO-Regression.R")
|
|
|
|
### Use npdro simulated data to test
|
|
source("Schrick-Noah_Simulated-Data.R")
|
|
bundled_data <- create_data()
|
|
|
|
run_comparison <- function(bundled_data){
|
|
### LASSO
|
|
unpen_beta <- lasso_coeff(bundled_data$train.X, bundled_data$train.y)
|
|
lasso.df <- data.frame(att=c("intercept", colnames(bundled_data$train.X)),
|
|
scores=unpen_beta$betas,
|
|
abs_scores=abs(unpen_beta$betas))
|
|
lasso.res <- dplyr::slice_max(lasso.df,order_by=abs_scores,n=20)
|
|
lasso.table <- as.data.table(lasso.res)
|
|
|
|
### Compare with Ridge
|
|
#### Find lambda
|
|
tune_results <- tune_ridge(bundled_data$train.X, bundled_data$train.y,
|
|
num_folds=10, 2^seq(-5,5,1), verbose=F)
|
|
plot(log(tune_results$cv.table$hyp), tune_results$cv.table$means, type="l",
|
|
xlab="lambda", ylab="CV Mean Loss")
|
|
abline(v=tune_results$lam.min)
|
|
tune_results$lam.min
|
|
|
|
#### Use lam.min for Ridge Regression
|
|
ridge_result <- ridge_betas(bundled_data$train.X, bundled_data$train.y,
|
|
beta_init = NULL, lam=tune_results$lam.min, method="BFGS")
|
|
ridge.df <- data.frame(att=c("intercept", colnames(bundled_data$train.X)),
|
|
scores=ridge_result$betas,
|
|
abs_scores=abs(ridge_result$betas))
|
|
ridge.res <- dplyr::slice_max(ridge.df,order_by=abs_scores,n=20)
|
|
ridge.table <- as.data.table(ridge.res)
|
|
|
|
### Compare with Random Forest
|
|
source("Schrick-Noah_Random-Forest.R")
|
|
rf_result <- rf_comp(bundled_data$train)
|
|
rf.df <- data.frame(att=c(colnames(bundled_data$train.X)),
|
|
scores=rf_result$rf2_imp$rf_score)
|
|
rf_res <- dplyr::slice_max(rf.df,order_by=scores, n=20)
|
|
rf.table <- as.data.table(rf_res)
|
|
|
|
### Compare with glmnet
|
|
source("Schrick-Noah_glmnet.R")
|
|
#### Alpha = 0
|
|
glm.res.0 <- glm_fcn(bundled_data$train.X, bundled_data$train.y, 0)
|
|
glm.df.0 <- data.frame(att=c("intercept", colnames(bundled_data$train.X)),
|
|
scores=glm.res.0$lambda.1se,
|
|
abs_scores=glm.res.0$abs_scores)
|
|
glm.df.0.res <- dplyr::slice_max(glm.df.0,order_by=abs_scores,n=20)
|
|
glm.0.table <- as.data.table(glm.df.0.res)
|
|
|
|
#### Alpha = 1
|
|
glm.res.1 <- glm_fcn(bundled_data$train.X, bundled_data$train.y, 1) # alpha=1
|
|
glm.df.1 <- data.frame(att=c("intercept", colnames(bundled_data$train.X)),
|
|
scores=glm.res.1$lambda.1se,
|
|
abs_scores=glm.res.1$abs_scores)
|
|
glm.df.1.res <- dplyr::slice_max(glm.df.1,order_by=abs_scores,n=20)
|
|
glm.1.table <- as.data.table(glm.df.1.res)
|
|
|
|
### Plot
|
|
#### Convert names to indices
|
|
lasso.df$att <- match(lasso.df$att,colnames(bundled_data$train))
|
|
ridge.df$att <- match(ridge.df$att,colnames(bundled_data$train))
|
|
rf.df$att <- match(rf.df$att,colnames(bundled_data$train))
|
|
glm.df.0$att <- match(glm.df.0$att,colnames(bundled_data$train))
|
|
glm.df.1$att <- match(glm.df.1$att,colnames(bundled_data$train))
|
|
|
|
#### Scale
|
|
lasso.df$abs_scores <- scale(lasso.df$abs_scores)
|
|
ridge.df$abs_scores <- scale(ridge.df$abs_scores)
|
|
rf.df$scores <- scale(rf.df$scores)
|
|
glm.df.0$abs_scores <- scale(glm.df.0$abs_scores)
|
|
glm.df.1$abs_scores <- scale(glm.df.1$abs_scores)
|
|
|
|
plot(x=lasso.df$att, y=lasso.df$abs_scores, type="l", xlab="Vars",
|
|
ylab="Coefficients (Abs Scores)", xaxt="n", col="blue", ylim=c(-1,3),
|
|
main="Scaled scores for simulated data feature selection")
|
|
axis(1, at=1:101, labels=colnames(bundled_data$train), cex.axis=0.5)
|
|
lines(x=ridge.df$att, y=ridge.df$abs_scores, col="red")
|
|
lines(x=rf.df$att, y=rf.df$scores, col="green")
|
|
lines(x=glm.df.0$att, y=glm.df.0$abs_scores, col="bisque4")
|
|
lines(x=glm.df.1$att, y=glm.df.1$abs_scores, col="purple")
|
|
legend(x="topleft",
|
|
legend=c("LASSO", "Ridge", "Random Forest","glmnet (alpha=0)", "glmnet (alpha=1)"),
|
|
lty=c(1,1,1,1,1),
|
|
col=c("blue", "red", "green", "bisque4", "purple"),
|
|
cex=1)
|
|
}
|
|
|
|
run_comparison(bundled_data)
|
|
## b. Repeat comparison using a graph with clusters
|
|
source("Schrick-Noah_graphs.R")
|
|
bundled_graph <- sim_graph_data()
|
|
bundled_graph_data <- bundled_graph$bundled_graph_data
|
|
g1 <- bundled_graph$g1
|
|
|
|
run_comparison(bundled_graph_data)
|
|
|
|
## c. Use npdro and igraph to create knn
|
|
### Bundled Graph Data
|
|
my.k <- 3 # larger k, fewer clusters
|
|
npdr.nbpairs.idx.g <- npdro::nearestNeighbors(t(bundled_graph_data$train.X),
|
|
# transpose does dist between predictors
|
|
# without transpose does dist between samples
|
|
#nbd.method="multisurf", k=0,
|
|
nbd.method = "relieff",
|
|
nbd.metric="manhattan",
|
|
k=my.k)
|
|
knn.graph.g <- graph_from_edgelist(as.matrix(npdr.nbpairs.idx.g),
|
|
directed=F)
|
|
knn.graph.g <- simplify(knn.graph.g)
|
|
|
|
### Plot network
|
|
plot.igraph(knn.graph.g,layout=layout_with_fr(knn.graph.g),
|
|
vertex.color="red",
|
|
vertex.size=3,vertex.label=NA,
|
|
main="Manhattan, knn-graph from simulated data
|
|
with erdos-renyi graph structure")
|
|
|
|
is_isomorphic_to(g1, knn.graph.g)
|
|
plot(g1)
|
|
|
|
clusters <- cluster_louvain(knn.graph.g)
|
|
plot(knn.graph.g, mark.groups=clusters)
|
|
|
|
|
|
## d. Add Laplace graph penalty
|
|
Lhat <- laplacian_matrix(g1, normalized = TRUE)
|
|
|
|
### Find resulting beta coeffs
|
|
|
|
### Optimize or choose value for lambda2
|
|
|
|
### Compare to a) and b)
|
|
|
|
# 2. Gradient Descent
|
|
## Write fn with learning param
|
|
grad.rosen <- function(xvec, a=2, b=100){
|
|
x <- xvec[1];
|
|
y <- xvec[2];
|
|
f.x <- -2*(a-x) - 4*b*x*(y-x^2)
|
|
f.y <- 2*b*(y-x^2)
|
|
return( c(f.x, f.y))
|
|
}
|
|
|
|
a = 2
|
|
b=100
|
|
alpha = .0001 # learning rate
|
|
p = c(0,0) # start for momentum
|
|
|
|
xy = c(-1.8, 3.0) # guess for solution
|
|
|
|
# gradient descent
|
|
epochs = 1000000
|
|
for (epoch in 1:epochs){
|
|
p = -grad.rosen(xy,a,b);
|
|
xy = xy + alpha*p;
|
|
}
|
|
|
|
print(xy) # Should be: ~(2,4)
|
|
|
|
# Using optim:
|
|
f.rosen <- function(xvec, a=2, b=100){
|
|
#a <- 2; b <- 1000;
|
|
x <- xvec[1];
|
|
y <- xvec[2];
|
|
return ( (a-x)^2 + b*(y-x^2)^2)
|
|
}
|
|
|
|
sol.BFGS <- optim(par=c(-1.8,3.0), fn=function(x){f.rosen(x,a=2,b=100)},
|
|
gr=function(x){grad.rosen(x,a=2,b=100)}, method="BFGS")
|
|
sol.BFGS$par
|