penalized_loss <- function(X, y, beta, lam, alpha=0){ # y needs to be 0/1 # beta: regression coefficients # lam: penalty, lam=0 un-penalized logistic regression # alpha = 0 ridge penalty, alpha = 1 lasso penalty m <- nrow(X) Xtilde <- as.matrix(cbind(intercept=rep(1,m), X)) cnames <- colnames(Xtilde) z <- Xtilde %*% beta # column vector yhat <- 1/(1+exp(-z)) yclass <- as.numeric(y) # 1. logistic unpenalized loss penal.loss <- sum(-yclass*log(yhat) - (1-yclass)*log(1-yhat))/m + # 2. penalty, lam=0 removes penalty lam*((1-alpha)*lam*sum(beta*beta)/2 + # ridge alpha*sum(abs(beta))) # lasso return(penal.loss) } ridge_grad <- function(X, y, beta, lam){ # y needs to be 0/1 # also works for non-penalized logistic regression if lam=0 m <- nrow(X) p <- ncol(X) Xtilde <- as.matrix(cbind(intercept=rep(1,m), X)) cnames <- colnames(Xtilde) z <- Xtilde %*% beta # column vector yhat <- 1/(1+exp(-z)) yclass <- as.numeric(y) grad <- rep(0,p+1) for (a in seq(1,p+1)){ beta_a <- beta[a] # input beta from previous descent step Loss.grad <- sum(-yclass*(1-yhat)*Xtilde[,a] + (1-yclass)*yhat*Xtilde[,a]) grad[a] <- Loss.grad + lam*beta_a } # end for loop grad <- grad/m return(grad) } ### gradient descent to optimize beta's ridge_betas <- function(X,y,beta_init=NULL,lam, alpha=0, method="BFGS"){ if (is.null(beta_init)){beta_init <- rep(.1, ncol(X)+1)} # method: BFGS, CG, Nelder-Mead no_penalty_cg <- optim(beta_init, # guess fn=function(beta){penalized_loss(X, y, beta, lam, alpha=0)}, # objective gr=function(beta){ridge_grad(X, y, beta, lam)}, # gradient method = method) #, control= list(trace = 2)) return(list(loss=no_penalty_cg$value, betas = no_penalty_cg$par)) } # Regression coeffs for LASSO lasso_betas <- function(X,y){ ridge_betas(X,y,beta_init=NULL,lam=0,alpha=0,method="BFGS") } # Adjust betas unpen_coeff <- function(X, y, lambda=0){ unpen_beta <- lasso_betas(X, y) for(beta in unpen_beta$betas){ if(abs(beta) <= lambda){ beta <- 0 } else if (beta > lambda){ beta <- beta-lambda } else{ beta <- beta+lambda } } return(unpen_beta) } if (!require("caret")) install.packages("caret") library(caret) tune_ridge <- function(X, y, num_folds, tune_grid, verbose=T){ folds <- caret::createFolds(y, k = num_folds) cv.results <- list() for (fold.id in seq(1,num_folds)){ te.idx <- folds[[fold.id]] if (verbose){cat("fold", fold.id, "of",num_folds,"\n")} if(verbose){cat("\t inner loop over hyperparameters...\n")} # iterate over hyperparameter scores <- sapply(tune_grid, # hyp loop var function(lam){ # train beta's btrain <- ridge_betas(X[-te.idx,], y[-te.idx], beta_init = NULL, lam=lam, method="BFGS")$betas # get test loss with training beta's penalized_loss(X[te.idx,], y[te.idx], btrain, lam=lam, alpha=0) } ) # end sapply hyp loop over hyperparameters cv.results[[fold.id]] <- scores # scores vector } # end for folds loop cv.results <- data.frame(cv.results) # turn list to df cv.results$means <- rowMeans(as.matrix(cv.results)) cv.results$hyp <- tune_grid colnames(cv.results) <- c(names(folds),"means","hyp") #### Select best performance best.idx <- which.min(cv.results$means) # accuracy return(list(cv.table = cv.results, lam.min = cv.results$hyp[best.idx])) }