-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path.Rhistory
108 lines (108 loc) · 5.14 KB
/
.Rhistory
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
library(tidyverse)
library(caret)
library(lubridate)
# Utility functions
collapse.factor <- function(x) {
x <- fct_lump_n(x, n=5)
x <- fct_explicit_na(x, na_level = "Other")
return(x)
}
# Function to plot missing value percentages in each column in a dataframe
get.missing.perc <- function(df) {
na.perc <- colMeans(is.na(df)) * 100
data.frame(perc=na.perc[na.perc > 0], name=names(na.perc[na.perc > 0])) %>%
arrange(perc) %>%
mutate(name=factor(name, levels = name)) %>%
ggplot(aes(y=name, x=perc, fill=perc<50)) +
geom_bar(stat = "identity") +
labs(title="Percentage of missing values", y="Variable", x="Percentage")
}
# Importing the test and train Datasets
train.data <- read.csv("Train.csv", na.strings = c("", "NA"))
test.data <- read.csv("Test.csv", na.strings = c("", "NA"))
# Getting the Unique customer IDs from both Train and Test data
custIds.train <- unique(train.data$custId)
custIds.test <- unique(test.data$custId)
# Checking if there are any duplicate customers in Test data and Train data
which(custIds.test %in% custIds.train) # Shows 0 common customer IDs
# Getting the Max and Min data seen in the Train dataset
maxdate = max(ymd(train.data$date))
mindate = min(ymd(train.data$date))
# Combining both Train and Test to do some Pre-Processing on some Important columns
# Then grouping the data using custId
full.data <- train.data %>% dplyr::select(-revenue) %>%
rbind(test.data) %>%
mutate(date=ymd(date),
across(.cols=where(is.character), .fns=collapse.factor)) %>%
group_by(custId) %>%
summarise(channelGrouping = max(ifelse(is.na(channelGrouping) == TRUE, -9999, channelGrouping)),
first_ses_from_the_period_start = min(date) - mindate,
last_ses_from_the_period_end = maxdate - max(date),
interval_dates = max(date) - min(date),
unique_date_num = length(unique(date)),
maxVisitNum = max(visitNumber, na.rm = TRUE),
browser = as.factor(first(ifelse(is.na(browser), -9999, browser))),
operatingSystem = as.factor(first(ifelse(is.na(operatingSystem) == TRUE, -9999, operatingSystem))),
deviceCategory = as.factor(first(ifelse(is.na(deviceCategory) == TRUE, -9999, deviceCategory))),
subContinent = as.factor(first(ifelse(is.na(subContinent) == TRUE, -9999, subContinent))),
country = as.factor(first(ifelse(is.na(country) == TRUE, -9999, country))),
region = as.factor(first(ifelse(is.na(region) == TRUE, -9999, region))),
networkDomain = as.factor(first(ifelse(is.na(networkDomain) == TRUE, -9999, networkDomain))),
source = as.factor(first(ifelse(is.na(source) == TRUE, -9999, source))),
medium = as.factor(first(ifelse(is.na(medium) == TRUE, -9999, medium))),
isVideoAd_mean = mean(ifelse(is.na(adwordsClickInfo.isVideoAd) == TRUE, 0, 1)),
isMobile = mean(ifelse(isMobile == TRUE, 1 , 0)),
isTrueDirect = mean(ifelse(is.na(isTrueDirect) == TRUE, 0, 1)),
bounce_sessions = sum(ifelse(is.na(bounces) == TRUE, 0, 1)),
pageviews_sum = sum(pageviews, na.rm = TRUE),
pageviews_mean = mean(ifelse(is.na(pageviews), 0, pageviews)),
pageviews_min = min(ifelse(is.na(pageviews), 0, pageviews)),
pageviews_max = max(ifelse(is.na(pageviews), 0, pageviews)),
pageviews_median = median(ifelse(is.na(pageviews), 0, pageviews)),
session_cnt = NROW(visitStartTime)) %>%
mutate(pctBounce = bounce_sessions/session_cnt,
allBounce = as.numeric(bounce_sessions >= session_cnt),
bounceViewRatio = bounce_sessions/(pageviews_sum+1))
# Splitting the full.data into train and test data again
train.data <- train.data %>%
group_by(custId) %>%
summarise(revenue=sum(revenue)) %>%
mutate(revenue=log(revenue+1)) %>%
inner_join(full.data %>% filter(custId %in% custIds.train), by="custId")
test.data <- full.data %>% filter(custId %in% custIds.test)
rm(full.data)
# Modelling
# Classifying if a customer will spend money or not using LDA classifier
fitControl <- trainControl(method = "cv")
fit.lda <- train(as.factor(revenue>0) ~ channelGrouping +
allBounce + region +
browser + operatingSystem + country +
bounce_sessions + pageviews_sum,
data=train.data, trControl=fitControl,
method="lda")
fit.lda
table(train.data$revenue>0, predict(fit.lda, train.data))
# Adding revenue>0 probability to training data
train.data$posterior <- predict(fit.lda, train.data, type="prob")[, "TRUE"]
# Fitting MARS model to predict the amount of LOG revenue a customer will generate
fitControl <- trainControl(method = "cv")
tuneGrid <- expand.grid(nprune=seq(20, 30, by=1), degree= 1:3)
fit.mars <- train(revenue ~ bounceViewRatio + channelGrouping + posterior +
allBounce + as.numeric(first_ses_from_the_period_start) +
as.numeric(last_ses_from_the_period_end) + pctBounce + region +
source + browser + operatingSystem + country + networkDomain +
log(bounce_sessions + 1) + bounce_sessions + pageviews_sum +
log(pageviews_sum + 1) + log(pageviews_mean + 1) + pageviews_max +
pageviews_median + log(session_cnt),
data=train.data, trControl=fitControl, tuneGrid=tuneGrid,
method="earth")
fit.mars
plot(fit.mars)
# Finally, preparing the test data
# Getting the LDA probabilities
test.data$posterior <- predict(fit.lda, test.data, type="prob")[, "TRUE"]
# Predicting test data using MARS
preds.mars <- predict(fit.mars, newdata=test.data)
preds.mars[preds.mars < 0] <- 0
tibble(custId = test.data$custId, predRevenue = preds.mars[,"y"]) %>%
write.csv("Submission.csv", row.names = F)