Word2vec进行中文情感分析

'''
Chinese sentiment analysis
'''

from sklearn.cross_validation import train_test_split
from gensim.models.word2vec import Word2Vec
import numpy as np
import pandas as pd
import jieba
from sklearn.externals import joblib    #把数据转化为二进制
from sklearn.svm import SVC
import sys

'''
数据预处理:载入数据
           预处理
           切分训练集和测试集
'''
def load_file_and_processing():
    neg = pd.read_excel('H:/word2vect_3data/Chinese_data/neg.xls')
    pos = pd.read_excel('H:/word2vect_3data/Chinese_data/pos.xls')

    cw = lambda x:list(jieba.cut(x))                #jieba分词
    pos['words'] = pos[0].apply(cw)				#此处会报错,读取时给列命名,在apply jieba.cut()不会报错
    neg['words'] = neg[0].apply(cw)

    # use 1 for positive sentiment, 0 for negative
    y = np.concatenate((np.ones(len(pos)),np.zeros(len(neg))))

    x_train,x_test,y_train,y_test = train_test_split(np.concatenate((po

你可能感兴趣的:(NLP)