graph laplacian penalty for lasso
This commit is contained in:
parent
2b7a52da12
commit
a1e661a078
@ -16,7 +16,7 @@ bundled_data <- create_data()
|
||||
|
||||
run_comparison <- function(bundled_data){
|
||||
### LASSO
|
||||
unpen_beta <- lasso_coeff(bundled_data$train.X, bundled_data$train.y)
|
||||
unpen_beta <- lasso_coeff(bundled_data$train.X, bundled_data$train.y,lambda=0)
|
||||
lasso.df <- data.frame(att=c("intercept", colnames(bundled_data$train.X)),
|
||||
scores=unpen_beta$betas,
|
||||
abs_scores=abs(unpen_beta$betas))
|
||||
@ -95,16 +95,19 @@ run_comparison <- function(bundled_data){
|
||||
lty=c(1,1,1,1,1),
|
||||
col=c("blue", "red", "green", "bisque4", "purple"),
|
||||
cex=1)
|
||||
|
||||
return(lasso.df)
|
||||
}
|
||||
|
||||
run_comparison(bundled_data)
|
||||
lasso_df.a <- run_comparison(bundled_data)
|
||||
|
||||
## b. Repeat comparison using a graph with clusters
|
||||
source("Schrick-Noah_graphs.R")
|
||||
bundled_graph <- sim_graph_data()
|
||||
bundled_graph_data <- bundled_graph$bundled_graph_data
|
||||
g1 <- bundled_graph$g1
|
||||
|
||||
run_comparison(bundled_graph_data)
|
||||
lasso_df.b <- run_comparison(bundled_graph_data)
|
||||
|
||||
## c. Use npdro and igraph to create knn
|
||||
### Bundled Graph Data
|
||||
@ -136,12 +139,40 @@ plot(knn.graph.g, mark.groups=clusters)
|
||||
|
||||
## d. Add Laplace graph penalty
|
||||
Lhat <- laplacian_matrix(g1, normalized = TRUE)
|
||||
### Find lambda
|
||||
tune_graph_results <- tune_ridge(bundled_graph_data$train.X, bundled_graph_data$train.y,
|
||||
num_folds=10, 2^seq(-5,5,1), verbose=F)
|
||||
plot(log(tune_graph_results$cv.table$hyp), tune_graph_results$cv.table$means, type="l",
|
||||
xlab="lambda", ylab="CV Mean Loss")
|
||||
abline(v=tune_graph_results$lam.min)
|
||||
tune_graph_results$lam.min
|
||||
|
||||
lap.penalty <- tune_graph_results$lam.min*Lhat
|
||||
|
||||
### Find resulting beta coeffs
|
||||
|
||||
### Optimize or choose value for lambda2
|
||||
unpen_beta_lap <- lasso_coeff(bundled_graph_data$train.X, bundled_graph_data$train.y,
|
||||
lap_penalty=lap.penalty)
|
||||
lasso.df.lap <- data.frame(att=c("intercept", colnames(bundled_graph_data$train.X)),
|
||||
scores=unpen_beta_lap$betas,
|
||||
abs_scores=abs(unpen_beta_lap$betas))
|
||||
lasso.res.lap <- dplyr::slice_max(lasso.df.lap,order_by=abs_scores,n=20)
|
||||
lasso.table.lap <- as.data.table(lasso.res.lap)
|
||||
|
||||
### Compare to a) and b)
|
||||
lasso.df.lap$att <- match(lasso.df.lap$att,colnames(bundled_graph_data$train))
|
||||
lasso.df.lap$abs_scores <- scale(lasso.df.lap$abs_scores)
|
||||
plot(x=lasso.df.lap$att, y=lasso.df.lap$abs_scores, type="l", xlab="Vars",
|
||||
ylab="Coefficients (Abs Scores)", xaxt="n", col="blue", ylim=c(-1,3),
|
||||
main="Scaled scores for simulated data feature selection for
|
||||
various LASSO approaches")
|
||||
lines(x=lasso_df.a$att, y=lasso_df.a$abs_scores, col="red")
|
||||
lines(x=lasso_df.b$att, y=lasso_df.b$abs_scores, col="green")
|
||||
legend(x="topleft",
|
||||
legend=c("Base", "Erdos-Renyi Graph Structure", "Graph Laplacian Penalty"),
|
||||
lty=c(1,1,1),
|
||||
col=c("blue", "red", "green"),
|
||||
cex=1)
|
||||
|
||||
|
||||
# 2. Gradient Descent
|
||||
## Write fn with learning param
|
||||
|
||||
@ -16,7 +16,7 @@ penalized_loss <- function(X, y, beta, lam, alpha=0){
|
||||
penal.loss <- sum(-yclass*log(yhat) - (1-yclass)*log(1-yhat))/m +
|
||||
# 2. penalty, lam=0 removes penalty
|
||||
lam*((1-alpha)*lam*sum(beta*beta)/2 + # ridge
|
||||
alpha*sum(abs(beta))) # lasso
|
||||
alpha*sum(abs(beta))) # lasso
|
||||
return(penal.loss)
|
||||
}
|
||||
|
||||
@ -58,12 +58,16 @@ lasso_betas <- function(X,y,beta_init=NULL){
|
||||
}
|
||||
|
||||
# Adjust betas
|
||||
lasso_coeff <- function(X, y, lambda=0.03125, tol=1e-2){
|
||||
lasso_coeff <- function(X, y, lambda=0.03125, tol=1e-2, lap_penalty=0){
|
||||
unpen_beta <- lasso_betas(X, y, beta_init=numeric(101))
|
||||
old_loss <- unpen_beta$loss
|
||||
lasso_converged <- FALSE
|
||||
loop_count <- 0
|
||||
while (!lasso_converged){
|
||||
if(all(lap_penalty==0)) {GL_penalty <- 0} else{
|
||||
GL_penalty = lap_penalty %*% as.matrix(unpen_beta$betas[1:100]) %*% as.matrix(t(unpen_beta$betas[1:100]))
|
||||
}
|
||||
|
||||
beta_LS <- optim(unpen_beta$betas, # guess
|
||||
fn=function(beta){penalized_loss(X, y, beta, lam=0, alpha=1)}, # objective
|
||||
gr=function(beta){ridge_grad(X, y, beta, lam=0)}, # gradient
|
||||
@ -79,6 +83,7 @@ lasso_coeff <- function(X, y, lambda=0.03125, tol=1e-2){
|
||||
else{
|
||||
beta_LS$par[i] <- beta_LS$par[i]+lambda
|
||||
}
|
||||
if(!all(GL_penalty==0)){beta_LS$par[i] <- beta_LS$par[i] - GL_penalty[i]}
|
||||
}
|
||||
unpen_beta <- lasso_betas(X,y,beta_init=beta_LS$par)
|
||||
lasso_converged <- abs(unpen_beta$loss - old_loss) < tol
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user