#鸢尾花数据集的线性LDA分类
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.samples_generator import make_classification
class LDA():
def Train(self, X, y):
X1 = np.array([X[i] for i in range(len(X)) if y[i] == 0])
X2 = np.array([X[i] for i in range(len(X)) if y[i] == 1])
# 求中心点
m1 = np.mean(X1, axis=0)
m2 = np.mean(X2, axis=0)
cov1 = np.dot((X1 - m1).T, (X1 - m1))
cov2 = np.dot((X2 - m2).T, (X2 - m2))
Sw = cov1 + cov2
w = np.dot(np.mat(Sw).I, (m1 - m2).reshape((len(m1), 1)))
self.m1 = m1
self.cov1 = cov1
self.m2 = m2
self.cov2 = cov2
self.Sw = Sw # 类内散度矩阵
self.w = w # 判别权重矩阵
def Test(self, X, y):
y_new = np.dot((X), self.w)
nums = len(y)
c1 = np.dot((self.m1 - self.m2).reshape(1, (len(self.m1))), np.mat(self.Sw).I)
c2 = np.dot(c1, (self.m1 + self.m2).reshape((len(self.m1), 1)))
c = 1/2 * c2
h = y_new - c
y_hat = []
for i in range(nums):
if h[i] >= 0:
y_hat.append(0)
else:
y_hat.append(1)
count = 0
for i in range(nums):
if y_hat[i] == y[i]:
count += 1
precise = count / nums
return precise
if '__main__' == __name__:
n_samples = 500
X, y = make_classification(n_samples=n_samples, n_features=2, n_redundant=0, n_classes=2,
n_informative=1, n_clusters_per_class=1, class_sep=0.5, random_state=10)
lda = LDA()
Xtrain = X[:299, :]
Ytrain = y[:299]
Xtest = X[300:, :]
Ytest = y[300:]
lda.Train(Xtrain, Ytrain)
precise = lda.Test(Xtest, Ytest)
# 原始数据
plt.scatter(X[:, 0], X[:, 1], marker='o', c=y)
plt.xlabel("x1")
plt.ylabel("x2")
plt.title("Test precise:" + str(precise))
plt.show()
#鸢尾花数据集的K-means分类
from sklearn import datasets
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
#加载数据集,是一个字典类似Java中的map
lris_df = datasets.load_iris()
#挑选出前两个维度作为x轴和y轴,你也可以选择其他维度
x_axis = lris_df.data[:,0]
y_axis = lris_df.data[:,2]
#这里已经知道了分2类,其他分类这里的参数需要调试
model = KMeans(n_clusters=2)
#训练模型
model.fit(lris_df.data)
#选取行标为100的那条数据,进行预测
prddicted_label= model.predict([[6.3, 3.3, 6, 2.5]])
#预测全部150条数据
all_predictions = model.predict(lris_df.data)
#打印出来对150条数据的聚类散点图
plt.scatter(x_axis, y_axis, c=all_predictions)
plt.show()
#鸢尾花数据集的SVM分析
# 使用sklearn的函数来获取MNIST数据集
from sklearn.datasets import fetch_openml
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# 为了显示中文
mpl.rcParams['font.sans-serif'] = [u'SimHei']
mpl.rcParams['axes.unicode_minus'] = False
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris["data"][:, (2, 3)] # petal length, petal width
y = iris["target"]
setosa_or_versicolor = (y == 0) | (y == 1)
X = X[setosa_or_versicolor]
y = y[setosa_or_versicolor]
# SVM Classifier model
svm_clf = SVC(kernel="linear", C=float("inf"))
svm_clf.fit(X, y)
def plot_svc_decision_boundary(svm_clf, xmin, xmax):
# 获取决策边界的w和b
w = svm_clf.coef_[0]
b = svm_clf.intercept_[0]
# At the decision boundary, w0*x0 + w1*x1 + b = 0
# => x1 = -w0/w1 * x0 - b/w1
x0 = np.linspace(xmin, xmax, 200)
# 画中间的粗线
decision_boundary = -w[0]/w[1] * x0 - b/w[1]
# 计算间隔
margin = 1/w[1]
gutter_up = decision_boundary + margin
gutter_down = decision_boundary - margin
# 获取支持向量
svs = svm_clf.support_vectors_
plt.scatter(svs[:, 0], svs[:, 1], s=180, facecolors='#FFAAAA')
plt.plot(x0, decision_boundary, "k-", linewidth=2)
plt.plot(x0, gutter_up, "k--", linewidth=2)
plt.plot(x0, gutter_down, "k--", linewidth=2)
# Bad models
x0 = np.linspace(0, 5.5, 200)
# 随便画的
pred_1 = 5*x0 - 20
pred_2 = x0 - 1.8
pred_3 = 0.1 * x0 + 0.5
plt.figure(figsize=(12,2.7))
plt.subplot(121)
plt.title("随便画的", fontsize=16)
plt.plot(x0, pred_1, "g--", linewidth=2)
plt.plot(x0, pred_2, "m-", linewidth=2)
plt.plot(x0, pred_3, "r-", linewidth=2)
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs", label="Iris-Versicolor")
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo", label="Iris-Setosa")
plt.xlabel("花瓣长度", fontsize=14)
plt.ylabel("花瓣宽度", fontsize=14)
plt.legend(loc="upper left", fontsize=14)
plt.axis([0, 5.5, 0, 2])
plt.subplot(122)
plt.title("大间隔分类", fontsize=16)
plot_svc_decision_boundary(svm_clf, 0, 5.5)
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs")
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo")
plt.xlabel("Petal length", fontsize=14)
plt.axis([0, 5.5, 0, 2])
plt.show()
#月亮数据集的线性LDA分析
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
class LDA():
def Train(self, X, y):
X1 = np.array([X[i] for i in range(len(X)) if y[i] == 0])
X2 = np.array([X[i] for i in range(len(X)) if y[i] == 1])
m1 = np.mean(X1, axis=0)
m2 = np.mean(X2, axis=0)
cov1 = np.dot((X1 - m1).T, (X1 - m1))
cov2 = np.dot((X2 - m2).T, (X2 - m2))
Sw = cov1 + cov2
w = np.dot(np.mat(Sw).I, (m1 - m2).reshape((len(m1), 1)))
self.m1 = m1
self.cov1 = cov1
self.m2 = m2
self.cov2 = cov2
self.Sw = Sw
self.w = w
def Test(self, X, y):
y_new = np.dot((X), self.w)
# 计算fisher线性判别式
nums = len(y)
c1 = np.dot((self.m1 - self.m2).reshape(1, (len(self.m1))), np.mat(self.Sw).I)
c2 = np.dot(c1, (self.m1 + self.m2).reshape((len(self.m1), 1)))
c = 1/2 * c2 # 2个分类的中心
h = y_new - c
# 判别
y_hat = []
for i in range(nums):
if h[i] >= 0:
y_hat.append(0)
else:
y_hat.append(1)
# 计算分类精度
count = 0
for i in range(nums):
if y_hat[i] == y[i]:
count += 1
precise = count / (nums+0.000001)
# 显示信息
print("测试样本数量:", nums)
print("预测正确样本的数量:", count)
print("测试准确度:", precise)
return precise
if '__main__' == __name__:
X, y = make_moons(n_samples=200, noise=0.15, random_state=42)
lda = LDA()
Xtrain = X[:120, :]
Ytrain = y[:120]
Xtest = X[80:, :]
Ytest = y[80:]
lda.Train(Xtrain, Ytrain)
precise = lda.Test(Xtest, Ytest)
plt.scatter(X[:, 0], X[:, 1], marker='o', c=y)
plt.xlabel("x1")
plt.ylabel("x2")
plt.title("Test precise:" + str(precise))
plt.show()
#月亮数据集的K-means分类
#月亮数据集的K-means分类
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=100, noise=0.15, random_state=42)
model = KMeans(n_clusters=2)#构造聚类器
model.fit(X)#聚类
label_pred = model.labels_ #获取聚类标签
#绘制k-means结果
x0 = X[label_pred == 0]
x1 = X[label_pred == 1]
plt.scatter(x0[:, 0], x0[:, 1], c = "red", marker='o', label='label0')
plt.scatter(x1[:, 0], x1[:, 1], c = "green", marker='*', label='label1')
plt.legend(loc=2)
plt.show()
#月亮数据集的SVM分析
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
import numpy as np
import matplotlib as mpl
from sklearn.datasets import make_moons
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
# 为了显示中文
mpl.rcParams['font.sans-serif'] = [u'SimHei']
mpl.rcParams['axes.unicode_minus'] = False
X, y = make_moons(n_samples=100, noise=0.15, random_state=42)
def plot_dataset(X, y, axes):
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "bs")
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "g^")
plt.axis(axes)
plt.grid(True, which='both')
plt.xlabel(r"$x_1$", fontsize=20)
plt.ylabel(r"$x_2$", fontsize=20, rotation=0)
plt.title("月亮数据",fontsize=20)
plot_dataset(X, y, [-1.5, 2.5, -1, 1.5])
plt.show()
polynomial_svm_clf = Pipeline([
# 将源数据 映射到 3阶多项式
("poly_features", PolynomialFeatures(degree=3)),
# 标准化
("scaler", StandardScaler()),
# SVC线性分类器
("svm_clf", LinearSVC(C=10, loss="hinge", random_state=42))
])
polynomial_svm_clf.fit(X, y)
def plot_predictions(clf, axes):
# 打表
x0s = np.linspace(axes[0], axes[1], 100)
x1s = np.linspace(axes[2], axes[3], 100)
x0, x1 = np.meshgrid(x0s, x1s)
X = np.c_[x0.ravel(), x1.ravel()]
y_pred = clf.predict(X).reshape(x0.shape)
y_decision = clf.decision_function(X).reshape(x0.shape)
# print(y_pred)
# print(y_decision)
plt.contourf(x0, x1, y_pred, cmap=plt.cm.brg, alpha=0.2)
plt.contourf(x0, x1, y_decision, cmap=plt.cm.brg, alpha=0.1)
plot_predictions(polynomial_svm_clf, [-1.5, 2.5, -1, 1.5])
plot_dataset(X, y, [-1.5, 2.5, -1, 1.5])
plt.show()
from sklearn.svm import SVC
gamma1, gamma2 = 0.1, 5
C1, C2 = 0.001, 1000
hyperparams = (gamma1, C1), (gamma1, C2)
svm_clfs = []
for gamma, C in hyperparams:
rbf_kernel_svm_clf = Pipeline([
("scaler", StandardScaler()),
("svm_clf", SVC(kernel="rbf", gamma=gamma, C=C))
])
rbf_kernel_svm_clf.fit(X, y)
svm_clfs.append(rbf_kernel_svm_clf)
plt.figure(figsize=(11, 7))
for i, svm_clf in enumerate(svm_clfs):
plt.subplot(221 + i)
plot_predictions(svm_clf, [-1.5, 2.5, -1, 1.5])
plot_dataset(X, y, [-1.5, 2.5, -1, 1.5])
gamma, C = hyperparams[i]
plt.title(r"$\gamma = {}, C = {}$".format(gamma, C), fontsize=16)
plt.tight_layout()
plt.show()
1、决策树易于理解和解释,可以可视化分析,容易提取出规则。
2、可以同时处理标称型和数值型数据。
3、测试数据集时,运行速度比较快。
4、决策树可以很好的扩展到大型数据库中,同时它的大小独立于数据库大小。