机器学习实战kaggle titanic 0.79904

一、数据预处理

1)加载数据集以及import

import pandas as pd
import numpy as np
import random as rnd
import re
# visualization
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
#读取数据
train=pd.read_csv('/home/kesci/input/temp7976/train.csv')
test=pd.read_csv('/home/kesci/input/temp7976/test.csv')

2)来看一下train的各项信息

PassengerId => 乘客ID
Pclass => 乘客等级(1/2/3等舱位)
Name => 乘客姓名
Sex => 性别
Age => 年龄
SibSp => 堂兄弟/妹个数
Parch => 父母与小孩个数
Ticket => 船票信息
Fare => 票价
Cabin => 客舱
Embarked => 登船港口


trian

分析:age有部分缺失值,进行填充处理。cabin存在较多缺失值,进行0 1处理。

3)观察trian格式

image.png

二、特征工程

1、对性别,亲戚人数,cabin,embarked做one hot

train_df = pd.get_dummies(train.Embarked)
train = pd.concat([train,train_df], axis=1)
test_df = pd.get_dummies(test.Embarked)
test = pd.concat([test,test_df], axis=1)
full_data = [train, test]
for dataset in full_data:
    dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
    dataset['Has_Cabin'] = dataset["Cabin"].apply(lambda x: 0 if type(x) == float else 1)
    dataset['Embarked'] = dataset['Embarked'].fillna('S')
    dataset['Sex'] = dataset['Sex'].map( {'female': 0, 'male': 1} )
    
image.png

2、年龄缺失值处理

算出不包括缺失值的所有年龄的均值和方差来做一个正太分布填充
for dataset in full_data:
    u=dataset.Age.mean(skipna=True)
    v=dataset.Age.var(skipna=True)
    std=pow(v,0.5)
    dataset.Age=dataset.Age.fillna(0).astype(int)
   
    def kk(x):
        if x==0:
            return abs(round(np.random.normal(u,std)))
        else :
            return x 
    dataset.Age=dataset.Age.apply(kk)

3、对票价,与年龄进行区间处理,删除不必要的特征

for dataset in full_data:
    #年龄
    dataset.loc[ dataset['Age'] <= 16, 'Age']    = 0
    dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
    dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
    dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
    dataset.loc[ dataset['Age'] > 64, 'Age']                           = 4
    #票价
    dataset['Fare'] = dataset['Fare'].fillna(train['Fare'].median())
    dataset.loc[ dataset['Fare'] <= 7.91, 'Fare']      = 0
    dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
    dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare']   = 2
    dataset.loc[ dataset['Fare'] > 31, 'Fare']      = 3
    dataset['Fare'] = dataset['Fare'].astype(int)
full_data[0] = full_data[0].drop(['SibSp','Parch','Ticket','Cabin','PassengerId','Embarked'],axis=1)
full_data[1] = full_data[1].drop(['SibSp','Parch','Ticket','Cabin','PassengerId','Embarked'],axis=1)
image.png

4、对姓名进行处理

def get_title(name):
    title_search = re.search(' ([A-Za-z]+)\.', name)
    # If the title exists, extract and return it.
    if title_search:
        return title_search.group(1)
    return ""

for dataset in full_data:
    dataset['Title'] = dataset['Name'].apply(get_title)
for dataset in full_data:
    dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col',\
    'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')

    dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
    dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
    dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
for dataset in full_data:
    dataset['Title'] = dataset['Title'].fillna("Rare")
    dataset['Age*Class'] = dataset.Age * dataset.Pclass
full_data[0]=pd.concat([full_data[0],pd.get_dummies(full_data[0].Title)], axis=1)
full_data[1]=pd.concat([full_data[1],pd.get_dummies(full_data[1].Title)], axis=1)
full_data[0] = full_data[0].drop(['Name','Title'], axis=1)
full_data[1] = full_data[1].drop(['Name','Title'], axis=1)
train=full_data[0]
test=full_data[1]
image.png

OK!特征取完了接下来开始瞅瞅热度图

#heatmap
colormap = plt.cm.RdBu
plt.figure(figsize=(14,12))
plt.title('Pearson Correlation of Features', y=1.05, size=15)
sns.heatmap(train.astype(float).corr(),linewidths=0.1,vmax=1.0, 
            square=True, cmap=colormap, linecolor='white', annot=True)
image.png

各个与生还之间有没有具体的关系

grid = sns.FacetGrid(full_data[0], row='Pclass', col='Sex', size=2.2, aspect=1.6)
grid.map(plt.hist,'Age', alpha=.5, bins=20)
grid.add_legend()
image.png

三、预测

1、一级预测

1)综合各个模型

# Going to use these 5 base models for the stacking
from sklearn.ensemble import (RandomForestClassifier, AdaBoostClassifier,
                              GradientBoostingClassifier, ExtraTreesClassifier)
from sklearn.svm import SVC
from sklearn.cross_validation import KFold



# Some useful parameters which will come in handy later on
ntrain = train.shape[0]
ntest = test.shape[0]
SEED = 0  # for reproducibility
NFOLDS = 5  # set folds for out-of-fold prediction
kf = KFold(ntrain, n_folds=NFOLDS, random_state=SEED)


# Class to extend the Sklearn classifier
class SklearnHelper(object):
    def __init__(self, clf, seed=0, params=None):
        params['random_state'] = seed
        self.clf = clf(**params)

    def train(self, x_train, y_train):
        self.clf.fit(x_train, y_train)

    def predict(self, x):
        return self.clf.predict(x)

    def fit(self, x, y):
        return self.clf.fit(x, y)

    def feature_importances(self, x, y):
        print(self.clf.fit(x, y).feature_importances_)
        return self.clf.fit(x, y).feature_importances_


def get_oof(clf, x_train, y_train, x_test):
    oof_train = np.zeros((ntrain,))
    oof_test = np.zeros((ntest,))
    oof_test_skf = np.empty((NFOLDS, ntest))

    for i, (train_index, test_index) in enumerate(kf):
        x_tr = x_train[train_index]
        y_tr = y_train[train_index]
        x_te = x_train[test_index]

        clf.train(x_tr, y_tr)

        oof_train[test_index] = clf.predict(x_te)
        oof_test_skf[i, :] = clf.predict(x_test)
    #print(oof_train)
    oof_test[:] = oof_test_skf.mean(axis=0)
    return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)



# Put in our parameters for said classifiers
# Random Forest parameters
rf_params = {
    'n_jobs': -1,
    'n_estimators': 500,
     'warm_start': True,
     #'max_features': 0.2,
    'max_depth': 6,
    'min_samples_leaf': 2,
    'max_features' : 'sqrt',
    'verbose': 0
}

# Extra Trees Parameters
et_params = {
    'n_jobs': -1,
    'n_estimators':500,
    #'max_features': 0.5,
    'max_depth': 8,
    'min_samples_leaf': 2,
    'verbose': 0
}

# AdaBoost parameters
ada_params = {
    'n_estimators': 500,
    'learning_rate' : 0.75
}

# Gradient Boosting parameters
gb_params = {
    'n_estimators': 500,
     #'max_features': 0.2,
    'max_depth': 5,
    'min_samples_leaf': 2,
    'verbose': 0
}

# Support Vector Classifier parameters
svc_params = {
    'kernel' : 'linear',
    'C' : 0.025
    }



# Create 5 objects that represent our 4 models
rf = SklearnHelper(clf=RandomForestClassifier, seed=SEED, params=rf_params)
et = SklearnHelper(clf=ExtraTreesClassifier, seed=SEED, params=et_params)
ada = SklearnHelper(clf=AdaBoostClassifier, seed=SEED, params=ada_params)
gb = SklearnHelper(clf=GradientBoostingClassifier, seed=SEED, params=gb_params)
svc = SklearnHelper(clf=SVC, seed=SEED, params=svc_params)

划分train和test

# Create Numpy arrays of train, test and target ( Survived) dataframes to feed into our models
y_train = train['Survived'].ravel()
train = train.drop(['Survived'], axis=1)
x_train = train.values # Creates an array of the train data
x_test = test.values # Creats an array of the test data

各个模型的importance综合

#importances for each model
rf_feature = rf.feature_importances(x_train,y_train)
et_feature = et.feature_importances(x_train, y_train)
ada_feature = ada.feature_importances(x_train, y_train)
gb_feature = gb.feature_importances(x_train,y_train)


cols = train.columns.values
# Create a dataframe with features
feature_dataframe = pd.DataFrame( {'features': cols,
     'Random Forest feature importances': rf_feature,
     'Extra Trees  feature importances': et_feature,
      'AdaBoost feature importances': ada_feature,
    'Gradient Boost feature importances': gb_feature
    })
feature_dataframe['mean'] = feature_dataframe.mean(axis= 1) # axis = 1 computes the mean row-wise
feature_dataframe=feature_dataframe.sort_index(by='mean',ascending=False)
feature_dataframe.plot(x='features',y='mean',kind='bar')
image.png

image.png

二级模型xgboost预测


# Create our OOF train and test predictions. These base results will be used as new features
et_oof_train, et_oof_test = get_oof(et, x_train, y_train, x_test) # Extra Trees
rf_oof_train, rf_oof_test = get_oof(rf,x_train, y_train, x_test) # Random Forest
ada_oof_train, ada_oof_test = get_oof(ada, x_train, y_train, x_test) # AdaBoost 
gb_oof_train, gb_oof_test = get_oof(gb,x_train, y_train, x_test) # Gradient Boost
svc_oof_train, svc_oof_test = get_oof(svc,x_train, y_train, x_test) # Support Vector Classifier

#predict second models
#单个模型的选择部分训练后联合作为作为xgboost的x_train,每个模型的predict作为xgboost的x_test
#总之是用模型的预测去fit
x_train = np.concatenate(( et_oof_train, rf_oof_train, ada_oof_train, gb_oof_train, svc_oof_train), axis=1)
x_test = np.concatenate(( et_oof_test, rf_oof_test, ada_oof_test, gb_oof_test, svc_oof_test), axis=1)


import xgboost as xgb
gbm = xgb.XGBClassifier(
    #learning_rate = 0.02,
 n_estimators= 2000,
 max_depth= 4,
 min_child_weight= 2,
 #gamma=1,
 gamma=0.9,                        
 subsample=0.8,
 colsample_bytree=0.8,
 objective= 'binary:logistic',
 nthread= -1,
 scale_pos_weight=1).fit(x_train, y_train)
predictions = gbm.predict(x_test)

输出文件提交结果

df=pd.DataFrame({ 'PassengerId': [x for x in range(892,1310)],
                   'Survived': predictions })
df.to_csv('over.csv',index=False)

image.png

参考

https://www.kaggle.com/arthurtok/introduction-to-ensembling-stacking-in-python

你可能感兴趣的:(机器学习实战kaggle titanic 0.79904)