-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathSteps for a Proper Analysis.txt
More file actions
162 lines (129 loc) · 5.29 KB
/
Steps for a Proper Analysis.txt
File metadata and controls
162 lines (129 loc) · 5.29 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
# import necessary libraries and dataset and define the data precisely
#1 df.Describe()
#2 df.Info()
#3 check unique values in dataset
df.apply(lambda x: len(x.unique()))
#4 check the null values and fill or remove null values
#5 check for categorical attributes
cat_col = []
for x in df.dtypes.index:
if df.dtypes[x] == 'object':
cat_col.append(x)
cat_col
#6 remove unwanted columns in categorical columns
cat_col.remove('sr no.')
cat_col
#7 print the categorical columns
for col in cat_col:
print(col)
print(df[col].value_counts())
print()
#8 replacing values
df['Item_fat_content'] = df['Item_fat_content'].replace({'LF':'Low Fat', 'reg':Regular', 'low fat': 'Low Fat'})
df['Item_fat_content'].value_counts()
# another example
df['New_Item_Type'] = df['New_Item_Type'].map({'FD':'Food', 'NC':'Non-Consumable', 'DR':'Drinks'})
df['New_Item_Type'].value_counts()
#9 create small values for year
df['Outlet_Years'] = 2013 - df['Outlet_Establishment_Year']
#10 Exploratory Dat Analysis
# make distplot
sns.distplot(df['Item_weight']) #do it for every variable
# make countplots
sns.countplot(df["Item_fat_content"]) # for categorical variables
# correlation and heatmap
number_df = df.select_dtypes(include='number')
corr = numeric_df.corr()
sns.heatmap(corr, annot=True, cmap="coolwarm")
#11 Label Encoding
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df['Outlet'] = le.fit_transform(df['Outlet_Identifier'])
cat_col = ['Item_Fat_Content', 'Item_Type', 'Outlet_Size', 'Outlet_Location_Type', 'Outlet_Type', 'New_Item_Type']
for col in cat_col:
df[col] = le.fit_transform(df[col])
# Onehot Encoding
df = pd.get_dummies(df, columns=['Item_Fat_Content', 'Outlet_Size', 'Outlet_Location_Type', 'Outlet_Type', 'New_Item_Type'])
df.head()
# input output split
X = df.drop(columns=['Outlet_Establishment_Year', 'Item_Identifier', 'Outlet_Identifier', 'Item_Outlet_Sales'])
y = df['Item_Outlet_Sales']
or
X = df.iloc[:.:-1]
y = df["result_variables"]
#12 Model Training
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error
def train(model, X, y):
# train the model
model.fit(X, y)
# predict the training set
pred = model.predict(X)
# perform cross-validation
cv_score = cross_val_score(model, X, y, scoring='neg_mean_squared_error', cv=5)
cv_score = np.abs(np.mean(cv_score))
print("Model Report")
print("MSE:",mean_squared_error(y,pred))
print("CV Score:", cv_score)
from sklearn.linear_model import LinearRegression, Ridge, Lasso
model = LinearRegression(normalize=True)
train(model, X, y)
coef = pd.Series(model.coef_, X.columns).sort_values()
coef.plot(kind='bar', title="Model Coefficients")
model = Ridge(normalize=True)
train(model, X, y)
coef = pd.Series(model.coef_, X.columns).sort_values()
coef.plot(kind='bar', title="Model Coefficients")
model = Lasso()
train(model, X, y)
coef = pd.Series(model.coef_, X.columns).sort_values()
coef.plot(kind='bar', title="Model Coefficients")
# apply different models like decision tree, ensemble, randomforest, k nearest etc
# other methods we need
$1 renamng columns
df = df.rename(columns={'weathersit':'weather',
'yr':'year',
'mnth':'month',
'hr':'hour',
'hum':'humidity',
'cnt':'count'})
df.head()
$2 # change int columns to category
cols = ['season','month','hour','holiday','weekday','workingday','weather']
for col in cols:
df[col] = df[col].astype('category')
df.info()
$3 Point plots
fig, ax = plt.subplots(figsize=(20,10))
sns.pointplot(data=df, x='hour', y='casual', hue='weekday', ax=ax)
ax.set(title='Count of bikes during weekdays and weekends: Unregistered users')
$4 barplots
fig, ax = plt.subplots(figsize=(20,10))
sns.barplot(data=df, x='month', y='count', ax=ax)
ax.set(title='Count of bikes during different months')
$5 using multiple model at once
from sklearn.linear_model import LinearRegression, Ridge, HuberRegressor, ElasticNetCV
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor
models = [LinearRegression(),
Ridge(),
HuberRegressor(),
ElasticNetCV(),
DecisionTreeRegressor(),
RandomForestRegressor(),
ExtraTreesRegressor(),
GradientBoostingRegressor()]
from sklearn import model_selection
def train(model):
kfold = model_selection.KFold(n_splits=5, random_state=42)
pred = model_selection.cross_val_score(model, X, y, cv=kfold, scoring='neg_mean_squared_error')
cv_score = pred.mean()
print('Model:',model)
print('CV score:', abs(cv_score))
for model in models:
train(model)
$6 Train test split
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
$ 7 Make confusion matrices if the model is classification
$ 8 check if the data is imbalanced, fix it using imblearn library