Study_note(zb_data)/Machine Learning

스터디노트 (ML8_Cross validation)

KloudHyun 2023. 9. 23. 16:00

📌KFold

import numpy as np
from sklearn.model_selection import KFold

X = np.array([[1,2], [3,4], [1,2], [3,4]])
y = np.array([1,2,3,4])
kf = KFold(n_splits=2)
kf = KFold(n_splits=2) #split을 2등분으로 나눈다

print(kf.get_n_splits(X))
kf
>>>
2
KFold(n_splits=2, random_state=None, shuffle=False)
for train_idx, test_idx in kf.split(X):
    print('---- idx')
    print(train_idx, test_idx)
    print('---- train data')
    print(X[train_idx])  
    print('---- validation data')
    print(X[test_idx])  
    >>>>
---- idx
[2 3] [0 1]
---- train data
[[1 2]
 [3 4]]
---- validation data
[[1 2]
 [3 4]]
---- idx
[0 1] [2 3]
---- train data
[[1 2]
 [3 4]]
---- validation data
[[1 2]
 [3 4]]

📌교차 검증 해보기!

import pandas as pd
red = pd.read_csv('../data/winequality-red.csv', sep=';') 
white = pd.read_csv('../data/winequality-white.csv', sep=';') 
red['color'] = 1.
white['color'] = 0.

wine = pd.concat([red, white])

X = wine.drop(['color'], axis= 1)
y = wine['color']
wine['taste'] = [1. if grade>5 else 0. for grade in wine['quality']]

X = wine.drop(['taste', 'quality'], axis=1)
y = wine['taste']
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=13)

wine_tree = DecisionTreeClassifier(max_depth=2, random_state=13)
wine_tree.fit(X_train, y_train)

y_pred_tr = wine_tree.predict(X_train)
y_pred_test = wine_tree.predict(X_test)

print('Train Acc : ', accuracy_score(y_train, y_pred_tr)) 
print('Test Acc : ', accuracy_score(y_test, y_pred_test))
>>>>
Train Acc :  0.7294593034442948
Test Acc :  0.7161538461538461
from sklearn.model_selection import KFold
kfold = KFold(n_splits=5)
wine_tree_cv = DecisionTreeClassifier(max_depth=2, random_state=13)

🔻 데이터 학습!

cv_accuracy = []

for train_idx, test_idx in kfold.split(X):
    X_train = X.iloc[train_idx]
    X_test = X.iloc[test_idx]
    y_train = y.iloc[train_idx]
    y_test = y.iloc[test_idx]

    wine_tree_cv.fit(X_train, y_train)
    pred = wine_tree_cv.predict(X_test)
    cv_accuracy.append(accuracy_score(y_test, pred))

cv_accuracy
>>>>
[0.6007692307692307,
 0.6884615384615385,
 0.7090069284064665,
 0.7628945342571208,
 0.7867590454195535]
np.mean(cv_accuracy)
>>>>
0.709578255462782

📌StratifiedKFold

from sklearn.model_selection import StratifiedKFold
skfold = StratifiedKFold(n_splits=5)
wine_tree_cv = DecisionTreeClassifier(max_depth=2, random_state=13)

cv_accuracy = []

for train_idx, test_idx in skfold.split(X, y):
    X_train = X.iloc[train_idx]
    X_test = X.iloc[test_idx]
    y_train = y.iloc[train_idx]
    y_test = y.iloc[test_idx]

    wine_tree_cv.fit(X_train, y_train)
    pred = wine_tree_cv.predict(X_test)
    cv_accuracy.append(accuracy_score(y_test, pred))

cv_accuracy
>>>
[0.5523076923076923,
 0.6884615384615385,
 0.7143956889915319,
 0.7321016166281755,
 0.7567359507313318]
np.mean(cv_accuracy)
>>>>
0.6888004974240539

📌Cross validation을 간단히 하자! (주로 쓰이니까 주목!)

- depth가 높다고 무조건 score가 높은 것은 아니다.

from sklearn.model_selection import cross_val_score

skfold = StratifiedKFold(n_splits=5)
wine_tree_cv = DecisionTreeClassifier(max_depth=5, random_state=13)

cross_val_score(wine_tree_cv, X, y, cv=skfold)
>>>>
array([0.50076923, 0.62615385, 0.69745958, 0.7582756 , 0.74903772])
from sklearn.model_selection import cross_val_score

skfold = StratifiedKFold(n_splits=5)
wine_tree_cv = DecisionTreeClassifier(max_depth=2, random_state=13)

cross_val_score(wine_tree_cv, X, y, cv=skfold)
>>>>
array([0.55230769, 0.68846154, 0.71439569, 0.73210162, 0.75673595])
from sklearn.model_selection import cross_validate

cross_validate(wine_tree_cv, X, y, cv=skfold, return_train_score=True)
>>>
{'fit_time': array([0.01352072, 0.01101708, 0.01058698, 0.01050925, 0.0102396 ]),
 'score_time': array([0.00100255, 0.00100493, 0.0009985 , 0.00099993, 0.00099993]),
 'test_score': array([0.50076923, 0.62615385, 0.69745958, 0.7582756 , 0.74903772]),
 'train_score': array([0.78795459, 0.78045026, 0.77568295, 0.76356291, 0.76279338])}
 
 # 과적합 현상도 함께 목격하고 있다.

📌하이퍼파라미터 튜닝 (GridSearchCV 사용)

- 결정나무에서 '아직' 우리가 튜닝해 볼만한 것은 max_depth

- 반복문으로 max_depth를 바꿔가며 테스트

import pandas as pd
red = pd.read_csv('../data/winequality-red.csv', sep=';') 
white = pd.read_csv('../data/winequality-white.csv', sep=';') 
red['color'] = 1.
white['color'] = 0.

wine = pd.concat([red, white])
wine['taste'] = [1. if grade>5 else 0. for grade in wine['quality']]

X = wine.drop(['taste', 'quality'], axis= 1)
y = wine['taste']
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier

params = {'max_depth' : [2,4,7,10]}

wine_tree = DecisionTreeClassifier(max_depth=2, random_state=13)
gridsearch = GridSearchCV(estimator=wine_tree, param_grid=params, cv=5)
gridsearch.fit(X, y)

from sklearn.pipeline import Pipeline
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import StandardScaler

estimators = [
    ('scaler', StandardScaler()),
    ('clf', DecisionTreeClassifier())
]

pipe = Pipeline(estimators)
param_grid = [ {'clf__max_depth' : [2,4,7,10]}]
GridSearch = GridSearchCV(estimator=pipe, param_grid=param_grid, cv=5) #cross validation
GridSearch.fit(X,y)
import pandas as pd
score_df = pd.DataFrame(GridSearch.cv_results_)
score_df[['params', 'rank_test_score', 'mean_test_score', 'std_test_score']]