交叉验证(Cross Validation),有的时候也称作循环估计(Rotation
Estimation),是一种统计学上将数据样本切割成较小子集的实用方法,该理论是由Seymour Geisser提出的。
在给定的建模样本中,拿出大部分样本进行建模型,留小部分样本用刚建立的模型进行预报,并求这小部分样本的预报误差,记录它们的平方加和。这个过程一直进行,直到所有的样本都被预报了一次而且仅被预报一次。把每个样本的预报误差平方加和,称为PRESS(predicted
Error Sum of Squares)。
常用的精度测试方法主要是交叉验证,例如10折交叉验证(10-fold cross
validation),将数据集分成十份,轮流将其中9份做训练1份做验证,10次的结果的均值作为对算法精度的估计,一般还需要进行多次10折交叉验证求均值
,例如:10次10折交叉验证,以求更精确一点。这个方法的优势在于,同时重复运用随机产生的子样本进行训练和验证,每次的结果验证一次。
"""这里是正确的代码""" def cross_10folds(data,folds,jiange,start_index,end_index):
df_test=data[start_index*jiange:end_index*jiange] #数据刚好可以做10折交叉验证。
df_test_index=list(df_test.index) df_test_flag=data.index.isin(df_test_index)
#都转换为list来判定成员资格 diff_flag = [not f for f in df_test_flag] #
不是df_test_flag里面的索引的索引记为df_train_index的索引集合 df_train= data[diff_flag] return
df_train,df_test path='C:/Users/Administrator/Desktop/zhou1.csv'
columns1=['cid','side_effects'] import pandas as pd
#data=pd.read_csv(path)[:100] #经验证划分完全正确 data=pd.read_csv(path)
data.columns=columns1 folds=10 jiange=int(data.shape[0]/folds) #10次10折交叉验证的代码
for i in range(1,11): #将数据集随机打散 data=data.sample(frac = 1) #随机打乱样本 for i in
range(1,folds+1): df_train,df_test=cross_10folds(data,folds,jiange,i-1,i)
#起始和尾部索引
df_train.to_csv('C:/Users/Administrator/Desktop/zhou/train_'+str(i)+'.csv',index=True,header=True)
#检验对了再改成False
df_test.to_csv('C:/Users/Administrator/Desktop/zhou/test_'+str(i)+'.csv',index=True,header=True)
1.matlab版本代码实现:
%十折交叉验证\ [m,n]=size(train_bags); %蛋白质的数量的个数 indices=crossvalind('Kfold',m,10);
temp_Hamming_Loss=zeros(1,10); temp_Macro_F1=zeros(1,10);
temp_Micro_F1=zeros(1,10); for k=1:1:10 test=(indices==k); %获取验证集,但是最后替换掉了
train=~test; train_data=train_bags(train,:); %训练集的包
train_target=train_lable(:,train); %训练集的标记 test_bags=train_bags(test,:);
%十折交叉验证的测试集包 test_target=train_lable(:,test); %十折交叉验证的测试集标记
2.Python3版本代码实现:
2.1.简易5折交叉验证分割结果打印:未随机打散的情形下
#简易5折交叉验证分割结果打印:未随机打散的情形下 from sklearn.cross_validation import KFold kf =
KFold(150,n_folds=5, shuffle=False) for eachfold in kf:
print(eachfold,'=========') #kf是一个长度为5的list,list中的每个元素是一次10折交叉验证的划分结果。
5折交叉验证划分结果的前半部分是trainset,后半部分是testset
(array([ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120,
121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136,
137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149]), array([ 0,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29]))
==================================================== (array([ 0, 1, 2, 3, 4, 5,
6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96,
97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128,
129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
145, 146, 147, 148, 149]), array([30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59]))
==================================================== (array([ 0, 1, 2, 3, 4, 5,
6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 90, 91, 92, 93, 94, 95, 96,
97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128,
129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
145, 146, 147, 148, 149]), array([60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89]))
==================================================== (array([ 0, 1, 2, 3, 4, 5,
6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
87, 88, 89, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132,
133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
149]), array([ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119]))
==================================================== (array([ 0, 1, 2, 3, 4, 5,
6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119]),
array([120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133,
134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
149]))
2.2.随机打散数据并做10次10折交叉验证
# -*- coding: utf-8 -*- import numpy as np from sklearn import
cross_validation from sklearn import datasets from sklearn import svm iris =
datasets.load_iris() X=iris.data Y=iris.target def tenFolds(X,Y): from
sklearn.model_selection import StratifiedKFold skf=
StratifiedKFold(n_splits=10) from sklearn.cross_validation import
cross_val_score clf = svm.SVC(kernel='linear', C=5) zhou=[]
#shoushou=list(skf.split(X,Y)) #发现每一个都是tuple类型的数据 for train_index,test_index in
skf.split(X,Y): print('Train: ',train_index,'Test: ',test_index)
X_train,X_test=X[train_index],X[test_index]
Y_train,Y_test=Y[train_index],Y[test_index]
v10=cross_val_score(clf,X_train,Y_train,cv=5).mean() zhou.append(v10)
print('均值:',np.mean(zhou)) print('方差:',np.std(zhou)) return
zhou,np.mean(zhou)+np.std(zhou) # X:features Y:targets cv:k import pandas as pd
all_valid=[] mean_var=[] #做10次10折交叉验证结果分析 for i in range(1,11):
df1=pd.DataFrame(X) df2=pd.DataFrame(Y) df3=pd.concat([df1,df2],axis=1)
df3.columns=['f1','f2','f3','f4','label'] df3=df3.sample(frac = 1) #随机打乱样本
df3.index=range(150) X1=np.array(df3[['f1','f2','f3','f4']])
Y1=np.array(df3['label']) zhou,vsd= tenFolds(X1,Y1) all_valid.append(zhou)
mean_var.append(vsd) np.mean(mean_var)
all_valid
[0.9777777777777776, 0.962962962962963, 0.962962962962963, 0.9703703703703702,
0.9703703703703702, 0.9703703703703702, 0.9777777777777779, 0.9555555555555555,
0.9555555555555555, 0.9703703703703702] [0.9555555555555555,
0.9703703703703704, 0.962962962962963, 0.9555555555555555, 0.9703703703703704,
0.962962962962963, 0.9703703703703702, 0.9925925925925926, 0.9703703703703702,
0.9777777777777776] [0.9703703703703704, 0.9777777777777779,
0.9555555555555555, 0.9703703703703704, 0.9703703703703704, 0.9481481481481481,
0.9703703703703704, 0.962962962962963, 0.962962962962963, 0.9777777777777779]
[0.9703703703703702, 0.9703703703703702, 0.962962962962963, 0.9851851851851852,
0.962962962962963, 0.962962962962963, 0.9703703703703702, 0.9777777777777779,
0.9703703703703702, 0.9703703703703702] [0.9777777777777779, 0.962962962962963,
0.9555555555555555, 0.962962962962963, 0.962962962962963, 0.9555555555555555,
0.9703703703703702, 0.9777777777777776, 0.9407407407407407, 0.9777777777777776]
[0.9481481481481481, 0.9703703703703704, 0.9777777777777779,
0.9407407407407409, 0.962962962962963, 0.9555555555555555, 0.9555555555555555,
0.962962962962963, 0.9703703703703702, 0.9851851851851852] [0.9777777777777776,
0.9777777777777776, 0.9777777777777776, 0.9703703703703704, 0.9925925925925926,
0.9851851851851852, 0.9777777777777779, 0.9703703703703704, 0.9703703703703704,
0.9851851851851852] [0.962962962962963, 0.9703703703703702, 0.9703703703703704,
0.962962962962963, 0.9703703703703702, 0.9777777777777779, 0.9481481481481481,
0.9555555555555555, 0.9481481481481481, 0.9555555555555557] [0.962962962962963,
0.9703703703703702, 0.9555555555555555, 0.9703703703703704, 0.9703703703703702,
0.962962962962963, 0.9703703703703702, 0.962962962962963, 0.9555555555555555,
0.962962962962963] [0.9703703703703702, 0.962962962962963, 0.9555555555555555,
0.962962962962963, 0.9703703703703702, 0.9703703703703702, 0.9777777777777779,
0.9851851851851852, 0.9777777777777779, 0.9703703703703702]
mean_var
0.9749615103905077 0.9792592592592592 0.9755863663546611 0.9769957569703696
0.975823919626472 0.9757929689449547 0.9855066526904122 0.9718518518518519
0.9699876405729984 0.9784847786297061
分层交叉验证和不平衡数据集的处理:
https://blog.csdn.net/asialee_bird/article/details/83714612
<https://blog.csdn.net/asialee_bird/article/details/83714612>
Python smote例程:使用sklearn的make_classification生成不平衡数据样本
https://blog.csdn.net/levy_cui/article/details/86707049
<https://blog.csdn.net/levy_cui/article/details/86707049>
热门工具 换一换