java中的接口是类吗
241
2022-09-06
python文本分类(python文本分类模型代码)
前面博客里面从谣言百科中爬取到了所有类别(10类)的新闻并以文本的形式存储。
现在对这些数据进行分类,上代码:
# -*- coding: utf-8 -*-"""Created on Fri Mar 9 14:18:49 2018@author: Administrator"""import osimport timeimport randomimport jiebaimport nltkimport sklearnfrom sklearn.naive_bayes import MultinomialNBimport numpy as npimport pylab as plimport matplotlib.pyplot as pltdef MakeWordsSet(words_file): words_set = set() with open(words_file, 'r', encoding='UTF-8') as fp: for line in fp.readlines(): word = line.strip() if len(word)>0 and word not in words_set: # 去重 words_set.add(word) return words_setdef TextProcessing(folder_path, test_size=0.2): folder_list = os.listdir(folder_path)#获取文件夹下所有子文件夹 data_list = []#获取文本数据 class_list = []#获取类别数据 # 所有类别进行循环 for folder in folder_list: new_folder_path = os.path.join(folder_path, folder)#获得子文件夹路径 files = os.listdir(new_folder_path)#获得子文件夹下所有文件 # 类内循环 j = 0 for file in files: if j > 410: # 每类text样本数最多不超过这个 break with open(os.path.join(new_folder_path, file), 'r', encoding='UTF-8') as fp: raw = fp.read() # print raw ## -------------------------------------------------------------------------------- ## jieba分词 word_cut = jieba.cut(raw, cut_all=False) # 精确模式,返回的结构是一个可迭代的genertor word_list = list(word_cut) # genertor转化为list,每个词unicode格式 ## -------------------------------------------------------------------------------- data_list.append(word_list) class_list.append(folder) j += 1 ## 划分训练集和测试集 # train_data_list, test_data_list, train_class_list, test_class_list = sklearn.cross_validation.train_test_split(data_list, class_list, test_size=test_size) data_class_list = list(zip(data_list, class_list))#zip函数:接受2个序列作为参数,返回tuple列表 random.shuffle(data_class_list)#shuffle() 将序列的所有元素随机排序。 index = int(len(data_class_list)*test_size)+1#数据总量*0.2来划分训练集和测试集 train_list = data_class_list[index:]#训练集为后0.8的数据 test_list = data_class_list[:index]#测试集为前0.2的数据 train_data_list, train_class_list = zip(*train_list)#训练数据集 test_data_list, test_class_list = zip(*test_list)#测试数据集 # 统计词频放入all_words_dict all_words_dict = {} for word_list in train_data_list: for word in word_list: if word in all_words_dict: all_words_dict[word] += 1 else: all_words_dict[word] = 1 # key函数利用词频进行降序排序 all_words_tuple_list = sorted(all_words_dict.items(), key=lambda f:f[1], reverse=True) # 内建函数sorted参数需为list all_words_list = list(zip(*all_words_tuple_list))[0] return all_words_list, train_data_list, test_data_list, train_class_list, test_class_listdef words_dict(all_words_list, deleteN, stopwords_set=set()):# 选取特征词:不全为数字,不是停留子,长度在1到5之间 feature_words = [] n = 1 for t in range(deleteN, len(all_words_list), 1): if n > 1000: # feature_words的维度1000 break # print all_words_list[t] if not all_words_list[t].isdigit() and all_words_list[t] not in stopwords_set and 1 运行完分类完成!
版权声明:本文内容由网络用户投稿,版权归原作者所有,本站不拥有其著作权,亦不承担相应法律责任。如果您发现本站中有涉嫌抄袭或描述失实的内容,请联系我们jiasou666@gmail.com 处理,核实后本网站将在24小时内删除侵权内容。
发表评论
暂时没有评论,来抢沙发吧~