input_str = """
There are some people who think love is sex
And marriage
And six o'clock-kisses
And children,
And perhaps it is,
Miss Lester.
But do you know what I think?
I think love is a touch and yet not a touch
"""
input_str = input_str.lower()
print(input_str)
import re
input_str = 'Hello Python123 666 Hi jupyter notebook 1111'
result = re.sub(r'\d+', '', input_str)
print(result)
结果如下:" Q! L5 L q7 M: F- x5 s. x z% c % w. i a! M0 ]4 _$ E 而在有些情况下,比如获取的数据中,招聘岗位信息里薪资是 15K 这样的,商品购买信息里商品购买人数是 8500+ 人购买了此商品,这时我们需要从中提取出数字。. x4 p% W7 H f- E
# 从Github下载停用词数据 https://github.com/zhousishuo/stopwords
import jieba
import re
# 读取用于测试的文本数据 用户评论
with open('comments.txt') as f:
data = f.read()
# 文本预处理 去除一些无用的字符 只提取出中文出来
new_data = re.findall('[\u4e00-\u9fa5]+', data, re.S)
new_data = "/".join(new_data)
# 文本分词 精确模式
seg_list_exact = jieba.cut(new_data, cut_all=False)
# 加载停用词数据
with open('stop_words.txt', encoding='utf-8') as f:
# 获取每一行的停用词 添加进集合
con = f.read().split('\n')
stop_words = set()
for i in con:
stop_words.add(i)
# 列表解析式 去除停用词和单个词
result_list = [word for word in seg_list_exact if word not in stop_words and len(word) > 1]
result_list