在机器学习实践中,数据集是算法验证和模型训练的基础。根据任务类型选择合适的数据集不仅能提高开发效率,还能确保模型评估的客观性。本文整理覆盖回归、分类、图像、文本四大领域的10多个经典数据集,并提供可直接运行的代码示例。
scikit-learn
内置数据集。from sklearn.datasets import fetch_california_housing
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
# 数据加载
data = fetch_cifornia_housing() # 修正拼写错误
X, y = data.data, data.target
# 数据分割
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# 模型训练
model = LinearRegression()
model.fit(X_train, y_train)
# 评估
preds = model.predict(X_test)
print(f'MSE: {mean_squared_error(y_test, preds):.2f}')
scikit-learn
内置数据集。from sklearn.datasets import load_diabetes
data = load_diabetes()
X, y = data.data, data.target
特征名称 | 描述 | 均值 | 标准差 |
---|---|---|---|
age | 年龄 | 48.52 | 23.61 |
bmi | 体重指数 | 26.38 | 4.31 |
bp | 平均血压 | 94.65 | 13.28 |
import pandas as pd
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00275/Bike-Sharing-Dataset/hour.csv "
df = pd.read_csv(url)
X = df.drop('cnt', axis=1)
y = df['cnt']
描述:包含7万张28x28像素的手写数字图像,用于图像分类
来源:torchvision
或keras
内置数据集
代码示例(PyTorch):
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
transform = transforms.Compose([transforms.ToTensor()])
train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
# 显示前9个样本
fig, axes = plt.subplots(3, 3, figsize=(8,8))
for i, ax in enumerate(axes.flat):
ax.imshow(train_dataset.data[i], cmap='gray')
ax.set_title(f"Label: {train_dataset.targets[i]}")
plt.tight_layout()
plt.show()
from torchvision import datasets, transforms
# 定义转换管道
transform = transforms.Compose([
transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
# 加载数据集
train_dataset = datasets.MNIST(root='./data',
train=True,
download=True,
transform=transform)
预处理流程:
描述:包含乘客信息及生存状态,用于二分类(生存/遇难)
来源:Kaggle或UCI机器学习库
代码示例:
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
url = "https://web.stanford.edu/class/archive/cs/cs109/cs109.1166/stuff/titanic.csv"
df = pd.read_csv(url)
# 数据预处理
df['Age'] = df['Age'].fillna(df['Age'].median())
X = pd.get_dummies(df[['Pclass', 'Age', 'SibSp', 'Parch', 'Fare']])
y = df['Survived']
# 训练模型
model = RandomForestClassifier()
model.fit(X, y)
import pandas as pd
from sklearn.impute import SimpleImputer
# 处理缺失值
imputer = SimpleImputer(strategy='median')
df['Age'] = imputer.fit_transform(df[['Age']])
# 创建家庭规模特征
df['FamilySize'] = df['SibSp'] + df['Parch'] + 1
import pandas as pd
url = "https://web.stanford.edu/class/archive/cs/cs109/cs109.1166/stuff/titanic.csv "
df = pd.read_csv(url)
X = df[['Pclass', 'Age', 'SibSp', 'Parch', 'Fare']].fillna(df.mean())
y = df['Survived']
from sklearn.ensemble import RandomForestClassifier
# 数据预处理
df['Age'] = df['Age'].fillna(df['Age'].median())
X = pd.get_dummies(df[['Pclass', 'Age', 'SibSp', 'Parch', 'Fare']])
# 训练模型
model = RandomForestClassifier()
model.fit(X, y)
# 特征重要性可视化
plt.barh(X.columns, model.feature_importances_)
import pandas as pd
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data "
df = pd.read_csv(url, header=None, names=['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income'])
X = df.drop('income', axis=1)
y = (df['income'] == '>50K').astype(int)
from sklearn.preprocessing import OneHotEncoder
# 类别特征列表
categorical_features = ['workclass', 'education', 'marital-status']
# One-Hot编码
encoder = OneHotEncoder(handle_unknown='ignore')
X_cat = encoder.fit_transform(df[categorical_features])
torchvision
或keras
内置数据集。from tensorflow.keras.datasets import cifar10
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
from torchvision import datasets, transforms
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))
])
train_dataset = datasets.CIFAR10(
root='./data',
train=True,
download=True,
transform=transform
)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True)
torchvision
或keras
内置数据集。from torchvision import datasets, transforms
transform = transforms.Compose([transforms.ToTensor()])
train_dataset = datasets.FashionMNIST(root='./data', train=True, download=True, transform=transform)
test_dataset = datasets.FashionMNIST(root='./data', train=False, download=True, transform=transform)
类别标签 | 类别名称 | 样本数量 |
---|---|---|
0 | T-shirt/top | 7,000 |
1 | Trouser | 7,000 |
2 | Pullover | 7,000 |
torchvision
内置数据集。from torchvision import datasets, transforms
transform = transforms.Compose([transforms.ToTensor()])
dataset = datasets.OxfordIIITPet(root='./data', download=True, transform=transform)
import torch
from torchvision.models import resnet50
# 加载预训练模型
model = resnet50(pretrained=True)
model.fc = torch.nn.Linear(2048, 37) # 修改输出层
keras
内置数据集。from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense
# 数据预处理
(X_train, y_train), _ = imdb.load_data(num_words=10000)
X_train = pad_sequences(X_train, maxlen=200)
# 构建模型
model = Sequential([
Embedding(10000, 128),
LSTM(64),
Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, y_train, epochs=3, batch_size=64)
from tensorflow.keras.preprocessing.sequence import pad_sequences
# 限制序列长度为500
maxlen = 500
X_train = pad_sequences(X_train, maxlen=maxlen)
X_test = pad_sequences(X_test, maxlen=maxlen)
torchtext
或直接下载。import pandas as pd
url = "https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/train.csv "
df = pd.read_csv(url, header=None, names=['label', 'title', 'description'])
X = df['description']
y = df['label'] - 1 # 转换为0-3的索引
import pandas as pd
url = "http://thuctc.thunlp.org/source/THUCNews.zip "
# 下载后解压,读取csv文件
df = pd.read_csv('THUCNews/财经/财经_0.txt', header=None, names=['text'])
y = 0 # 手动标注类别
import jieba
text = "清华大学自然语言处理实验室发布最新研究成果"
seg_list = jieba.cut(text)
print("/".join(seg_list)) # 清华大学/自然语言/处理/实验室/发布/最新/研究成果
import pandas as pd
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data "
df = pd.read_csv(url, header=None)
X = df.drop(1, axis=1)
y = (df[1] == 'M').astype(int)
import seaborn as sns
import matplotlib.pyplot as plt
corr_matrix = df.corr()
sns.heatmap(corr_matrix, annot=True, cmap='coolwarm')
plt.title('Feature Correlation Matrix')
pycocotools
):from pycocotools.coco import COCO
annFile = 'instances_train2017.json'
coco = COCO(annFile)
imgIds = coco.getImgIds()
from pycocotools.coco import COCO
import matplotlib.pyplot as plt
# 加载标注
coco = COCO('instances_train2017.json')
img = coco.loadImgs(imgIds[0])[0]
annIds = coco.getAnnIds(imgIds=img['id'])
anns = coco.loadAnns(annIds)
# 显示标注
plt.imshow(image)
coco.showAnns(anns)
import json
with open('train-v2.0.json', 'r') as f:
squad_data = json.load(f)
contexts = [data['context'] for data in squad_data['data'][0]['paragraphs']]
questions = [qas['question'] for qas in squad_data['data'][0]['paragraphs'][0]['qas']]
领域 | 数据集名称 | 任务类型 | 样本量 | 特点 |
---|---|---|---|---|
回归 | 加州房价 | 房价预测 | 20,640 | 包含地理位置、人口密度等特征,适合特征工程研究 |
回归 | 糖尿病 | 疾病进展预测 | 442 | 小数据集,适合快速验证模型 |
分类 | MNIST | 手写数字识别 | 70,000 | 图像分类入门首选,28x28灰度图像 |
分类 | 泰坦尼克号 | 生存预测 | 891 | 结构化数据,包含类别和数值特征,适合数据预处理练习 |
图像分类 | CIFAR-10 | 物体分类 | 60,000 | 32x32彩色图像,10个类别,适合CNN基础实验 |
图像分类 | Oxford-IIIT Pet | 细粒度分类 | 7,390 | 包含宠物品种和属性标注,适合细粒度视觉任务 |
文本分类 | IMDb | 情感分析 | 50,000 | 影评二分类,适合文本深度学习入门 |
文本分类 | AG News | 新闻分类 | 120,000 | 4个大类,包含标题和描述字段,适合文本分类迁移学习 |
中文文本分类 | THUCNews | 新闻分类 | 740,000 | 14个中文新闻类别,适合中文NLP研究 |
通用数据集 | UCI机器学习库 | 分类/回归/聚类 | 数百个数据集 | 涵盖医疗、金融、教育等领域,适合学术研究和竞赛 |
选择标准 | 推荐数据集 | 优势 |
---|---|---|
快速验证 | 糖尿病数据集 | 样本量小(442),训练速度快 |
图像分类入门 | Fashion-MNIST | 清晰类别划分,28x28标准尺寸 |
中文NLP研究 | THUCNews | 74万样本覆盖14个中文新闻类别 |
结构化数据实践 | 泰坦尼克号数据集 | 包含混合类型特征,适合预处理练习 |
领域 | 数据集名称 | 任务类型 | 样本量 | 典型算法 |
---|---|---|---|---|
回归 | 加州房价 | 房价预测 | 20,640 | 线性回归、GBDT |
分类 | MNIST | 手写数字识别 | 70,000 | CNN、SVM |
图像分类 | CIFAR-10 | 物体分类 | 60,000 | ResNet、Vision Transformer |
文本分类 | IMDb | 情感分析 | 50,000 | LSTM、BERT |
目标检测 | COCO | 实例分割 | 330,000 | Mask R-CNN、YOLO |
以上数据集覆盖了回归、分类、图像、文本等多个领域,且大部分可通过scikit-learn
、torchvision
、keras
等库直接加载或从公开渠道获取。实际应用中,可根据任务类型(如房价预测、情感分析)和数据特点(如结构化数据、图像、文本)选择合适的数据集,并结合数据预处理(如归一化、分词)和模型调优(如超参数调整)进行实验。
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
# 数据加载与预处理
url = "https://web.stanford.edu/class/archive/cs/cs109/cs109.1166/stuff/titanic.csv"
df = pd.read_csv(url)
df['Age'] = df['Age'].fillna(df['Age'].median())
# 特征工程
X = pd.get_dummies(df[['Pclass', 'Age', 'SibSp', 'Parch', 'Fare']])
y = df['Survived']
# 模型训练
model = RandomForestClassifier(n_estimators=100)
model.fit(X, y)
# 评估
preds = model.predict(X)
print(f"Training Accuracy: {accuracy_score(y, preds):.2f}")