penalized_loss <- function(X, y, beta, lam, alpha=0){ # y needs to be 0/1 # beta: regression coefficients # lam: penalty, lam=0 un-penalized logistic regression # alpha = 0 ridge penalty, alpha = 1 lasso penalty m <- nrow(X) Xtilde <- as.matrix(cbind(intercept=rep(1,m), X)) cnames <- colnames(Xtilde) z <- Xtilde %*% beta # column vector yhat <- 1/(1+exp(-z)) yclass <- as.numeric(y) # 1. logistic unpenalized loss penal.loss <- sum(-yclass*log(yhat) - (1-yclass)*log(1-yhat))/m + # 2. penalty, lam=0 removes penalty lam*((1-alpha)*lam*sum(beta*beta)/2 + # ridge alpha*sum(abs(beta))) # lasso return(penal.loss) } ridge_grad <- function(X, y, beta, lam){ # y needs to be 0/1 # also works for non-penalized logistic regression if lam=0 m <- nrow(X) p <- ncol(X) Xtilde <- as.matrix(cbind(intercept=rep(1,m), X)) cnames <- colnames(Xtilde) z <- Xtilde %*% beta # column vector yhat <- 1/(1+exp(-z)) yclass <- as.numeric(y) grad <- rep(0,p+1) for (a in seq(1,p+1)){ beta_a <- beta[a] # input beta from previous descent step Loss.grad <- sum(-yclass*(1-yhat)*Xtilde[,a] + (1-yclass)*yhat*Xtilde[,a]) grad[a] <- Loss.grad + lam*beta_a } # end for loop grad <- grad/m return(grad) } ### gradient descent to optimize beta's ridge_betas <- function(X,y,beta_init=NULL,lam, alpha=0, method="BFGS"){ if (is.null(beta_init)){beta_init <- rep(.1, ncol(X)+1)} # method: BFGS, CG, Nelder-Mead no_penalty_cg <- optim(beta_init, # guess fn=function(beta){penalized_loss(X, y, beta, lam, alpha=0)}, # objective gr=function(beta){ridge_grad(X, y, beta, lam)}, # gradient method = method) #, control= list(trace = 2)) return(list(loss=no_penalty_cg$value, betas = no_penalty_cg$par)) } # Regression coeffs for LASSO lasso_betas <- function(X,y){ ridge_betas(X,y,beta_init=NULL,lam=0,alpha=0,method="BFGS") } # Adjust betas unpen_coeff <- function(X, y, lambda=0){ unpen_beta <- lasso_betas(X, y) for(beta in unpen_beta$betas){ if(abs(beta) <= lambda){ beta <- 0 } else if (beta > lambda){ beta <- beta-lambda } else{ beta <- beta+lambda } } }