tolusajobi / StrokeOutcome_ML-LogisticRegression_Program

0 stars 0 forks source link

Project R Code Comparing Logistic Regression and ML alogrithms #2

Closed tolusajobi closed 4 years ago

tolusajobi commented 4 years ago

##############################################################################################################

FUNCTIONAL IMPAIRMENT RISK PREDICTION: A COMPARISON OF MACHINE LEARNING AND LOGISTIC REGRESSION MODELS

TRAINING AND INTERNAL VALIDATION OF ML AND REGRESSION-BASED MODELS IN PROVE-IT STUDY DATA

                        #

##############################################################################################################

INSTALL AND LOAD THE FOLLOW PACKAGES

library(dplyr) # for data manipulation library(caret) # for model-building library(DMwR) # for smote implementation library(purrr) # for functional programming (map) library(pROC) # for AUC calculations library(ROSE) # for re-sampling library(Zelig) # for rare event logistic regresson library(randomForest) #for random forest library(e1071) # for svm; library(MASS) library(rpart) #for decision tree library(ada) #for adaptive boost machine library(partykit) #for classification and regression trees library(compositions) library(naivebayes) #for naive bayes library(psych) library(MLmetrics) #for F1 Score calculations library(verification) #for calibration brierscore library(epiR) #for sensistivity and specificity calculaions library(mccr) library(ggpubr) library(fitdistrplus) library(verification) library(VIM)

DATA UPLOAD

shakiru<- read.csv(file.choose(), header = T). ##training data

test<- read.csv(file.choose(), header = T) ###ext

PROVE-IT

NS=nrow(shakiru)

nrow(na.omit(shakiru))

shakiru1<- data.frame(shakiru) NS2<- nrow(shakiru1)

shakiru2<- subset(shakiru1, select = c(mrs02_D90, age, nihss, glucose, sbp, dip, hypertension, diabetes, treatment, imaging))

INTERRSECT

NT=nrow(test)

library(VIM)

IMPUTE MISSING DATA

test2<- hotdeck(test)

NT2 <- nrow(test2)

str(shakiru2)

shaju

#################################################################

DATA PRE-PROCESSING AND CREATION OF TRAINING AND TEST DATASETS

###############################################################W# ============================RF===================================

shakiru2$age<- as.numeric(shakiru2$age) shakiru2$sbp<- as.numeric(shakiru2$sbp) shakiru2$dbp<- as.numeric(shakiru2$dbp) shakiru2$glucose<- as.numeric(shakiru2$glucose) shakiru2$nihss<- as.numeric(shakiru2$nihss) shakiru2$mrs02_D90<- as.factor(shakiru2$mrs02_D90) shakiru2$hypertension<- as.factor(shakiru2$hypertension) shakiru2$diabetes<- as.factor(shakiru2$diabetes) shakiru2$treatment<- as.factor(shakiru2$treatment) shakiru2$imaging<- as.factor(shakiru2$imaging) shakiru2$mrs02_D90<- as.factor(shakiru2$mrs02_D90) shakiru2$hxhrtdis<- as.factor(shakiru2$hxhrtdis) shakiru2$hxchf<- as.factor(shakiru2$hxchf) shakiru2$hxafib<- as.factor(shakiru2$hxafib) shakiru2$smoking<- as.factor(shakiru2$smoking) shakiru2$sex<- as.factor(shakiru2$sex) str(shakiru2) summary(shakiru2)

preproc <- preProcess(shakiru2[c(1:9,10)], method=c("center", "scale")) norm <- predict(preproc, shakiru2[c(1:9,10)]) summary(norm) str(norm)

mice_plot <- aggr(norm, col=c('navyblue','yellow'), numbers=F, sortVars=TRUE, labels=names(norm), cex.axis=.8, gap=1, ylab=c("Missing data","Pattern"))

set.seed(123) ind <- sample(2, nrow(norm), replace = TRUE, prob = c(0.6, 0.4)) train <- norm[ind==1,] dim(train) test <- norm[ind==2,] dim(test) str(test)

============Custom Control Parameters=================

custom<- trainControl(method = "repeatedcv", number = 10, repeats = 5, verboseIter = T)

###########################################

RANDOM FOREST CLASSIFICATION

############################################ set.seed(1234) mtry <- 1000 tunegrid <- expand.grid(.mtry=mtry) lm <- train(mrs02_D90~., train, method = "rf", trControl = custom, na.action = na.exclude)

======Prediction and Confusion Matrix in TRAIN data =========

train<- na.omit(train) preds<-predict(lm, train) confusionMatrix(preds,train$mrs02_D90) Precision(preds, train$mrs02_D90) F1_Score(preds, train$mrs02_D90) auc.rf=auc(train$mrs02_D90, as.numeric(as.character(preds))) auc.rf varImp(lm, scale = TRUE)

============Prediction and Confusion Matrix in TEST data========

library(klaR) test<- na.omit(test) str(test) preds.rft<-predict(lm, test) conf_matrix.rf<-table(preds.rft, test$mrs02_D90) sensitivity(conf_matrix.rf) epi.tests(conf_matrix.rf) Precision(preds.rft, test$mrs02_D90) mccr(preds.rft, test$mrs02_D90) auc.rf=auc(test$mrs02_D90, as.numeric(as.character(preds.rft))) auc.rf.ci=ci.auc(test$mrs02_D90, as.numeric(as.character(preds.rft))) auc.rf auc.rf.ci

test$mrs02_D90 <- as.numeric(as.character(test$mrs02_D90)) preds.rft <- as.numeric(as.character(preds.rft)) is.numeric(test$mrs02_D90) is.numeric(preds.rft) aa<- round(test$mrs02_D90, preds.rft) a<- verify(test$mrs02_D90, preds.rft) summary(a)

ESTIMATION OF BRIER SCORE AND 95%CI

brier_score <- function(preds, obs) { mean((obs - preds)^2) } N <- 1000 get_boot_est_preds <- function(preds, obs, metric) { idx <- sample(length(preds), replace = TRUE) metric(preds[idx], obs[idx]) } reps_pred <- replicate(N, get_boot_est_preds(preds.rft, test$mrs02_D90, brier_score))

get_boot_est_mod <- function(test, metric) { metric(preds.rft, test$mrs02_D90) } reps_model <- replicate(N, get_boot_est_mod(test, brier_score)) res <- rbind(data.frame(brier_score = reps_pred, approach = 'predictions'), data.frame(brier_score = reps_model, approach = 'refit_model')) ggplot(res, aes(brier_score, color = approach)) + geom_density() + theme_bw() calc_ci_95 <- function(v) { q <- format(quantile(v, probs = c(0.025, 0.975)), digits = 5) paste0('(',q[1],' to ',q[2],')') } cat('CI using bootstrapped estimates from predictions only:', calc_ci_95(reps_pred),'\n')

##########################################

ADAPTIVE BOOSTING

########################################### set.seed(1234) lm1 <- train(mrs02_D90~., train, method = "ada", trControl = custom, na.action = na.exclude) summary(lm1) plot(lm1)

======Prediction and Confusion Matrix in train data =========

preds<-predict(lm1, train) confusionMatrix(preds,train$mrs02_D90) Precision(preds, train$mrs02_D90) F1_Score(preds, train$mrs02_D90) auc.ad=auc(train$mrs02_D90, as.numeric(as.character(preds))) auc.ad varImp(lm1, scale = TRUE)

============Prediction and Confucion Matrix in test data========

library(klaR) preds.adt<-predict(lm1, test) conf_matrix.ad<-table(preds.adt, test$mrs02_D90) sensitivity(conf_matrix.ad) epi.tests(conf_matrix.ad) Precision(preds.adt, test$mrs02_D90) mccr(preds.adt, test$mrs02_D90) auc.ad=auc(test$mrs02_D90, as.numeric(as.character(preds.adt))) auc.ad.ci=ci.auc(test$mrs02_D90, as.numeric(as.character(preds.adt))) auc.ad auc.ad.ci test$mrs02_D90 <- as.numeric(as.character(test$mrs02_D90)) preds.adt <- as.numeric(as.character(preds.adt)) is.numeric(test$mrs02_D90) is.numeric(preds.adt) bb<- round(test$mrs02_D90, preds.adt) b<- verify(test$mrs02_D90, preds.adt) summary(b)

Brier Score CI

reps_pred <- replicate(N, get_boot_est_preds(preds.adt, test$mrs02_D90, brier_score))

get_boot_est_mod <- function(test, metric) { metric(preds.adt, test$mrs02_D90) } reps_model <- replicate(N, get_boot_est_mod(test, brier_score)) res <- rbind(data.frame(brier_score = reps_pred, approach = 'predictions'), data.frame(brier_score = reps_model, approach = 'refit_model')) ggplot(res, aes(brier_score, color = approach)) + geom_density() + theme_bw() calc_ci_95 <- function(v) { q <- format(quantile(v, probs = c(0.025, 0.975)), digits = 5) paste0('(',q[1],' to ',q[2],')') } cat('CI using bootstrapped estimates from predictions only:', calc_ci_95(reps_pred),'\n')

##############################

Logistic Regression

############################### set.seed(1234) lm2 <- train(mrs02_D90~., train, method = "glm", trControl = custom, family = "binomial", na.action = na.exclude)

======Prediction and Confusion Matrix in train data =========

library(klaR) preds<-predict(lm2, train) confusionMatrix(preds,train$mrs02_D90) Precision(preds, train$mrs02_D90) F1_Score(preds, train$mrs02_D90) auc.lr=auc(train$mrs02_D90, as.numeric(as.character(preds))) varImp(lm2, scale = TRUE)

============Prediction and Confucion Matrix in test data========

library(klaR) preds.lrt<-predict(lm2, test) conf_matrix.lr<-table(preds.lrt, test$mrs02_D90) sensitivity(conf_matrix.lr) epi.tests(conf_matrix.lr) Precision(preds.lrt, test$mrs02_D90) mccr(preds.lrt, test$mrs02_D90) auc.lr=auc(test$mrs02_D90, as.numeric(as.character(preds.lrt))) auc.lr.ci=ci.auc(test$mrs02_D90, as.numeric(as.character(preds.lrt))) auc.lr auc.lr.ci

test$mrs02_D90 <- as.numeric(as.character(test$mrs02_D90)) preds.lrt <- as.numeric(as.character(preds.lrt)) is.numeric(test$mrs02_D90) is.numeric(preds.lrt) cc<- round(test$mrs02_D90, preds.lrt) c<- verify(test$mrs02_D90, preds.lrt) summary(c)

Estimation of Brier Score& 95%CI

reps_pred <- replicate(N, get_boot_est_preds(preds.lrt, test$mrs02_D90, brier_score))

get_boot_est_mod <- function(test, metric) { metric(preds.lrt, test$mrs02_D90) } reps_model <- replicate(N, get_boot_est_mod(test, brier_score)) res <- rbind(data.frame(brier_score = reps_pred, approach = 'predictions'), data.frame(brier_score = reps_model, approach = 'refit_model')) ggplot(res, aes(brier_score, color = approach)) + geom_density() + theme_bw() calc_ci_95 <- function(v) { q <- format(quantile(v, probs = c(0.025, 0.975)), digits = 5) paste0('(',q[1],' to ',q[2],')') } cat('CI using bootstrapped estimates from predictions only:', calc_ci_95(reps_pred),'\n')

#####################################################

CLASSIFICATION AND REGRESSION TREE

##################################################### set.seed(1234) lm3 <- train(mrs02_D90~., train, method = "rpart", trControl = custom, na.action = na.exclude)

======Prediction and Confusion Matrix in train data =========

library(klaR) preds<-predict(lm3, train) confusionMatrix(preds,train$mrs02_D90) Precision(preds, train$mrs02_D90) F1_Score(preds, train$mrs02_D90) auc.dt=auc(train$mrs02_D90, as.numeric(as.character(preds))) varImp(lm3, scale = TRUE)

============Prediction and Confucion Matrix in test data========

preds.dtt<-predict(lm3, test) conf_matrix.dt<-table(preds.dtt, test$mrs02_D90) sensitivity(conf_matrix.dt) epi.tests(conf_matrix.dt) Precision(preds.dtt, test$mrs02_D90) mccr(preds.dtt, test$mrs02_D90) auc.dt=auc(test$mrs02_D90, as.numeric(as.character(preds.dtt))) auc.dt.ci=ci.auc(test$mrs02_D90, as.numeric(as.character(preds.dtt))) auc.dt auc.dt.ci test$mrs02_D90 <- as.numeric(as.character(test$mrs02_D90)) preds.dtt <- as.numeric(as.character(preds.dtt)) is.numeric(test$mrs02_D90) is.numeric(preds.dtt) dd<- round(test$mrs02_D90, preds.dtt) d<- verify(test$mrs02_D90, preds.dtt)

summary(d)

Estimation of Brier Score and 95%CI

reps_pred <- replicate(N, get_boot_est_preds(preds.dtt, test$mrs02_D90, brier_score))

get_boot_est_mod <- function(test, metric) { metric(preds.dtt, test$mrs02_D90) } reps_model <- replicate(N, get_boot_est_mod(test, brier_score)) res <- rbind(data.frame(brier_score = reps_pred, approach = 'predictions'), data.frame(brier_score = reps_model, approach = 'refit_model')) ggplot(res, aes(brier_score, color = approach)) + geom_density() + theme_bw() calc_ci_95 <- function(v) { q <- format(quantile(v, probs = c(0.025, 0.975)), digits = 5) paste0('(',q[1],' to ',q[2],')') } cat('CI using bootstrapped estimates from predictions only:', calc_ci_95(reps_pred),'\n')

########################################

C5.0 DECISION TREE

######################################## set.seed(1234) lm4 <- train(mrs02_D90~., train, method = "C5.0", trControl = custom, na.action = na.exclude)

======Prediction and Confusion Matrix in train data =========

train<- na.omit(train) preds<-predict(lm4, train) confusionMatrix(preds,train$mrs02_D90) Precision(preds, train$mrs02_D90) F1_Score(preds, train$mrs02_D90) auc.cart=auc(train$mrs02_D90, as.numeric(as.character(preds))) varImp(lm4, scale = TRUE)

============Prediction and Confucion Matrix in test data========

library(klaR) test<- na.omit(test) preds.cat<-predict(lm4, test) conf_matrix.cart<-table(preds.cat, test$mrs02_D90) sensitivity(conf_matrix.cart) epi.tests(conf_matrix.cart) test$mrs02_D90 <- as.factor(test$mrs02_D90) levels(test$mrs02_D90) <- levels(preds) Precision(preds.cat, test$mrs02_D90) mccr(preds.cat, test$mrs02_D90) auc.c50=auc(test$mrs02_D90, as.numeric(as.character(preds.cat))) auc.c50.ci=ci.auc(test$mrs02_D90, as.numeric(as.character(preds.cat))) auc.c50 auc.c50.ci test$mrs02_D90 <- as.numeric(as.character(test$mrs02_D90)) preds.cat <- as.numeric(as.character(preds.cat)) is.numeric(test$mrs02_D90) is.numeric(preds.cat) ee<- round(test$mrs02_D90, preds.cat) e<- verify(test$mrs02_D90, preds.cat) summary(e)

Estimated Brier Score & 95%CI

reps_pred <- replicate(N, get_boot_est_preds(preds.cat, test$mrs02_D90, brier_score))

get_boot_est_mod <- function(test, metric) { metric(preds.cat, test$mrs02_D90) } reps_model <- replicate(N, get_boot_est_mod(test, brier_score)) res <- rbind(data.frame(brier_score = reps_pred, approach = 'predictions'), data.frame(brier_score = reps_model, approach = 'refit_model')) ggplot(res, aes(brier_score, color = approach)) + geom_density() + theme_bw() calc_ci_95 <- function(v) { q <- format(quantile(v, probs = c(0.025, 0.975)), digits = 5) paste0('(',q[1],' to ',q[2],')') } cat('CI using bootstrapped estimates from predictions only:', calc_ci_95(reps_pred),'\n')

###############################################

SUPPORT VECTOR MACHINE

############################################### lm5 <- train(mrs02_D90~., train, method = "svmRadial", trControl = custom, na.action = na.exclude)

======Prediction and Confusion Matrix in train data =========

preds<-predict(lm5, train) train$mrs02_D90 <- as.factor(train$mrs02_D90) levels(train$mrs02_D90) <- levels(preds) confusionMatrix(preds,train$mrs02_D90) Precision(preds, train$mrs02_D90) F1_Score(preds, train$mrs02_D90) auc.svm=auc(train$mrs02_D90, as.numeric(as.character(preds))) varImp(lm5, scale = TRUE)

============Prediction and Confucion Matrix in test data========

test<- na.omit(test) preds.svm<-predict(lm5, test) conf_matrix.svm<-table(preds.svm, test$mrs02_D90) epi.tests(conf_matrix.svm) Precision(preds.svm, test$mrs02_D90) F1_Score(preds.svm, test$mrs02_D90) mccr(preds.svm, test$mrs02_D90) auc.svm=auc(test$mrs02_D90, as.numeric(as.character(preds.svm))) auc.svm.ci=ci.auc(test$mrs02_D90, as.numeric(as.character(preds.svm))) auc.svm auc.svm.ci test$mrs02_D90 <- as.numeric(as.character(test$mrs02_D90)) preds.svm <- as.numeric(as.character(preds.svm)) is.numeric(test$mrs02_D90) is.numeric(preds.svm) ff<- round(test$mrs02_D90, preds.svm) f<- verify(test$mrs02_D90, preds.svm) summary(f)

Brier Score CI

reps_pred <- replicate(N, get_boot_est_preds(preds.svm, test$mrs02_D90, brier_score))

get_boot_est_mod <- function(test, metric) { metric(preds.svm, test$mrs02_D90) } reps_model <- replicate(N, get_boot_est_mod(test, brier_score)) res <- rbind(data.frame(brier_score = reps_pred, approach = 'predictions'), data.frame(brier_score = reps_model, approach = 'refit_model')) ggplot(res, aes(brier_score, color = approach)) + geom_density() + theme_bw() calc_ci_95 <- function(v) { q <- format(quantile(v, probs = c(0.025, 0.975)), digits = 5) paste0('(',q[1],' to ',q[2],')') } cat('CI using bootstrapped estimates from predictions only:', calc_ci_95(reps_pred),'\n')

##################################################

LASSO LOGISTIC REGRESSION

##################################################

===========LASSO Logistic Regression==============

set.seed(1234) lasso<- train(mrs02_D90~., train, family = "binomial", method = "glmnet", tuneGrid = expand.grid(alpha = 1, lambda = seq(0.0001, 1, length = 5)), trControl = custom, na.action = na.exclude)

=================Prediction and Confusion matrix in ttrain===============

library(klaR) p1<- predict(lasso, train) confusionMatrix(p1, train$mrs02_D90) Precision(p1, train$mrs02_D90) F1_Score(p1, train$mrs02_D90) auc.las=auc(train$mrs02_D90, as.numeric(as.character(p1))) auc.las varImp(lasso, scale = TRUE)

==============Prediction and confusion matrix in test==========

p2<- predict(lasso, test) conf_matrix<-table(p2, test$mrs02_D90) sensitivity(conf_matrix) epi.tests(conf_matrix) Precision(p2, test$mrs02_D90) F1_Score(p2, test$mrs02_D90) mccr(p2, test$mrs02_D90) auc.las=auc(test$mrs02_D90, as.numeric(as.character(p2))) auc.las auc.las=ci.auc(test$mrs02_D90, as.numeric(as.character(p2))) test$mrs02_D90 <- as.numeric(as.character(test$mrs02_D90)) p2 <- as.numeric(as.character(p2)) is.numeric(test$mrs02_D90) is.numeric(p2) gg<- round(test$mrs02_D90, p2) g<- verify(test$mrs02_D90, p2) summary(g) ########Brier Score CI reps_pred <- replicate(N, get_boot_est_preds(p2, test$mrs02_D90, brier_score))

get_boot_est_mod <- function(test, metric) { metric(p2, test$mrs02_D90) } reps_model <- replicate(N, get_boot_est_mod(test, brier_score)) res <- rbind(data.frame(brier_score = reps_pred, approach = 'predictions'), data.frame(brier_score = reps_model, approach = 'refit_model')) ggplot(res, aes(brier_score, color = approach)) + geom_density() + theme_bw() calc_ci_95 <- function(v) { q <- format(quantile(v, probs = c(0.025, 0.975)), digits = 5) paste0('(',q[1],' to ',q[2],')') } cat('CI using bootstrapped estimates from predictions only:', calc_ci_95(reps_pred),'\n')

tolusajobi commented 4 years ago

Done