文本数据增强
数据处理
数据采样
过采样和负采样(注意数据的采集标注等本身成本高,尽量少使用欠采样。)
EDA
使用EDA时需要考虑任务情况,有些EDA操作会改变语义,要保证语义的完整性。
- 同义词替换:从句子中随机选择非停止词,用随机选择的同义词替换这些单词。
- 随机插入:随机的找出句子中某个不属于停用词集的词,并求出其随机的同义词,将该同义词插入句子的一个随机位置。重复操作。
- 随机交换:随机的选择句子中的两个单词并交换他们的位置,易改变语义。
- 随即删除:随即删除句子中的单词。
回译
各种翻译模型
第一,如果采用翻译模型,可以采用 random sample 或 beam search 等策略实现成倍数的数据扩充。如果采用 google 等翻译工具,通过更换中间语种,也可以实现 N 倍的数据扩充。
第二,目前翻译模型对长文本输入的支持较弱,因此在实际中,一般会将文本按照「。」等标点符号拆分为一条条句子,然后分别进行回译操作,最后再组装为新的文本。
- Google翻译API TextBlob 梯子使用
- 百度翻译API
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 22 14:35:43 2019
@author: dabing
使用翻译接口,对文本数据进行回译,用来进行训练样本的数据量扩充
"""
import hashlib
import urllib
import json
from translate import Translator
import jieba
import synonyms
import random
from random import shuffle
import sys
import os
import time
class convertText(object):
def __init__(self,fromLangByBaidu,toLangByBaidu,fromLangByMicrosoft,toLangByMicrosoft):
self.appid = # 填写你的appid
self.secretKey = #填写你的密钥
self.url_baidu_api = 'http://api.fanyi.baidu.com/api/trans/vip/translate' #百度通用api接口
self.fromLang = fromLangByBaidu
self.toLang = toLangByBaidu
self.fromLangByMicrosoft = fromLangByMicrosoft
self.toLangByMicrosoft = toLangByMicrosoft
self.stop_words = self.load_stop_word(os.path.join(sys.path[0], 'stop_words.txt'))
def _translateFromBaidu(self,text,fromLang,toLang):
salt = random.randint(32768, 65536) #随机数
sign = self.appid + text + str(salt) + self.secretKey #签名 appid+text+salt+密钥
sign = hashlib.md5(sign.encode()).hexdigest() #sign 的MD5值
url_baidu = self.url_baidu_api + '?appid=' + self.appid + '&q=' + urllib.parse.quote(text) + '&from=' + fromLang + '&to=' + toLang + '&salt=' + str(salt) + '&sign=' + sign
# 进程挂起时间1s
# time.sleep(1)
try:
response = urllib.request.urlopen(url_baidu,timeout=30)
content = response.read().decode("utf-8")
data = json.loads(content)
if 'error_code' in data:
print('错误代码:{0}, {1}'.format(data['error_code'],data['error_msg']))
return 'error'
else:
return str(data['trans_result'][0]['dst'])
except urllib.error.URLError as error:
print(error)
return 'error'
except urllib.error.HTTPError as error:
print(error)
return 'error'
#-------test translateFromBaidu-----
#text = '我很喜欢这部电影!你呢?'
#print(translateFromBaidu(text,fromLang,toLang))
#使用百度翻译API进行回译 chinese->english->chinese
def convertFromBaidu(self,text):
# print(self.fromLang,self.toLang)
translation1 = self._translateFromBaidu(text,self.fromLang,self.toLang)
# translation1 = self._translateFromBaidu(text,'zh','en')
if translation1 == 'error':
return 'error'
print('1 is over')
translation2 = self._translateFromBaidu(translation1,self.toLang,self.fromLang)
if translation2 == 'error':
return 'error'
print('2 is over')
# print(translation1,translation2,text)
if translation2 != text:
return translation2
return 'same'
#使用微软翻译API进行回译 chinese->english->chinese
def convertFromMicrosoft(self,text):
translator1 = Translator(from_lang=self.fromLangByMicrosoft,to_lang=self.toLangByMicrosoft)
translation1 = translator1.translate(text)
translator2 = Translator(from_lang=self.toLangByMicrosoft,to_lang=self.fromLangByMicrosoft)
translation2 = translator2.translate(translation1)
if translation2 != text:
return translation2
return 'same'
def edaRepalcement(self,text,stop_words,replace_num):
# 中文同义词词典 synonyms 中文近义词工具包,可以用于自然语言理解的很多任务:文本对齐,推荐算法,相似度计算,语义偏移,关键字提取,概念提取,自动摘要,搜索引擎等。
'''
随机替换
'''
new_words = text.copy()
random_word_list = list(set([word for word in text if word not in stop_words]))
random.shuffle(random_word_list)
num_replaced = 0
for random_word in random_word_list:
synonym_list = synonyms.nearby(random_word)[0] #返回的是近义词列表 nearby 返回[[近义词],[相似值]]
if len(synonym_list) >= 1:
synonym = random.choice(synonym_list) #随机选取一个近义词
new_words = [synonym if word == random_word else word for word in new_words]
num_replaced += 1
if num_replaced >= replace_num:
break
sentence = ' '.join(new_words)
sentence = sentence.strip()
new_words = sentence.split(' ')
return new_words #返回的是替换后的词的列表
def _add_words(self,new_words):
synonym = []
count = 0
while len(synonym) < 1:
random_word = new_words[random.randint(0,len(new_words)-1)]
synonym = synonyms.nearby(random_word)[0]
count += 1
#如果10次还没有同义词的,就返回
if count >= 10:
return
random_sysnonym = random.choice(synonym)
random_index = random.randint(0,len(new_words)-1)
new_words.insert(random_index,random_sysnonym)
def edaRandomInsert(self,text,insert_num):
'''
随机插入
'''
new_words = text.copy()
for num in range(insert_num):
self._add_words(new_words)
return new_words
def _swap_word(self,new_words):
random_idx_1 = random.randint(0, len(new_words)-1)
random_idx_2 = random_idx_1
counter = 0
while random_idx_2 == random_idx_1:
random_idx_2 = random.randint(0, len(new_words)-1)
counter += 1
if counter > 3:
return new_words
new_words[random_idx_1], new_words[random_idx_2] = new_words[random_idx_2], new_words[random_idx_1]
return new_words
def edaRandomSwap(self,text,swap_num):
'''
随即交换
'''
new_words = text.copy()
for index in range(swap_num):
new_words = self._swap_word(new_words)
return new_words
def edaRandomDelete(self,text,p):
if len(text) == 1:
return text
new_words = []
for word in text:
r = random.uniform(0, 1)
if r > p:
new_words.append(word)
if len(new_words) == 0:
rand_int = random.randint(0, len(text)-1)
return [text[rand_int]]
return new_words
def load_stop_word(self,path):
stop_words = []
with open(path,'r',encoding ='utf-8') as file:
for line in file.readlines():
stop_words.append(line)
return stop_words
def eda(self,text,aug_num,replace_rate,add_rate,swap_rate,delete_rate):
'''
默认每种eda方法只使用一次,即产生4条
'''
segment_words = jieba.lcut(text)
num_words = len(segment_words)
# stop_words_path = os.path.join(sys.path[0], 'stop_words.txt')
# stop_words = self.load_stop_word(stop_words_path)
stop_words = self.stop_words
replace_num = max(1,int(replace_rate*num_words))
swap_num = max(1,int(swap_rate*num_words))
add_num = max(1,int(add_rate*num_words))
text_augment = []
text_replace = ''.join(self.edaRepalcement(segment_words,stop_words,replace_num))
text_add = ''.join(self.edaRandomInsert(segment_words,add_num))
text_swap = ''.join(self.edaRandomSwap(segment_words,swap_num))
text_delete = ''.join(self.edaRandomDelete(segment_words,delete_rate))
text_augment.append(text_replace)
text_augment.append(text_add)
text_augment.append(text_swap)
text_augment.append(text_delete)
return text_augment
# def eda_convert(self,data_list):
#text = '电影评价,窗前明月光,我很喜十八你欢这部电影!你和啊哈哈呢,我的宝贝?'
#conText = convertText(fromLangByBaidu='zh',toLangByBaidu='en',fromLangByMicrosoft='chinese',toLangByMicrosoft='english')
##
#print(conText.convertFromBaidu(text))
#print(conText.convertFromMicrosoft())
生成模型
seq2seq任务
生成对抗网络
炼丹
设置权重
在训练的时候给损失函数直接设定一定的比例,使得算法能够对小类数据更多的注意力。例如在深度学习中,做一个3分类任务,标签a、b、c的样本比例为1:1:8。在交叉熵损失函数中就可以用类似这样的权重设置:
torch.nn.CrossEntropyLoss(weight=torch.from_numpy(np.array([8,8,1])).float().to(device))
Focal loss
专注于对困难稀疏样本的的损失贡献,降低简单样本的权重使得简单样本对损失贡献减小,以此来解决不平衡问题
贴下代码:
# -*- coding: utf-8 -*-
# @Author : LG
from torch import nn
import torch
from torch.nn import functional as F
class focal_loss(nn.Module):
'''
需要保证每个batch的长度一样,不然会报错
'''
def __init__(self, alpha=0.25, gamma=2, num_classes = 3, size_average=True):
"""
focal_loss损失函数, -α(1-yi)**γ *ce_loss(xi,yi)
步骤详细的实现了 focal_loss损失函数.
:param alpha: 阿尔法α,类别权重. 当α是列表时,为各类别权重,当α为常数时,类别权重为[α, 1-α, 1-α, ....],常用于 目标检测算法中抑制背景类 , retainnet中设置为0.25
:param gamma: 伽马γ,难易样本调节参数. retainnet中设置为2
:param num_classes: 类别数量
:param size_average: 损失计算方式,默认取均值
"""
super(focal_loss,self).__init__()
self.size_average = size_average
if isinstance(alpha,list):
assert len(alpha)==num_classes # α可以以list方式输入,size:[num_classes] 用于对不同类别精细地赋予权重
print("Focal_loss alpha = {}, 将对每一类权重进行精细化赋值".format(alpha))
self.alpha = torch.Tensor(alpha)
else:
assert alpha<1 #如果α为一个常数,则降低第一类的影响,在目标检测中为第一类
print(" --- Focal_loss alpha = {} ,将对背景类进行衰减,请在目标检测任务中使用 --- ".format(alpha))
self.alpha = torch.zeros(num_classes)
self.alpha[0] += alpha
self.alpha[1:] += (1-alpha) # α 最终为 [ α, 1-α, 1-α, 1-α, 1-α, ...] size:[num_classes]
self.gamma = gamma
def forward(self, preds, labels):
"""
focal_loss损失计算
:param preds: 预测类别. size:[B,N,C] or [B,C] 分别对应与检测与分类任务, B 批次, N检测框数, C类别数
:param labels: 实际类别. size:[B,N] or [B]
:return:
"""
# assert preds.dim()==2 and labels.dim()==1
preds = preds.view(-1,preds.size(-1))
self.alpha = self.alpha.to(preds.device)
preds_softmax = F.softmax(preds, dim=1) # 这里并没有直接使用log_softmax, 因为后面会用到softmax的结果(当然你也可以使用log_softmax,然后进行exp操作)
preds_logsoft = torch.log(preds_softmax)
preds_softmax = preds_softmax.gather(1,labels.view(-1,1)) # 这部分实现nll_loss ( crossempty = log_softmax + nll )
preds_logsoft = preds_logsoft.gather(1,labels.view(-1,1))
self.alpha = self.alpha.gather(0,labels.view(-1))
loss = -torch.mul(torch.pow((1-preds_softmax), self.gamma), preds_logsoft) # torch.pow((1-preds_softmax), self.gamma) 为focal loss中 (1-pt)**γ
loss = torch.mul(self.alpha, loss.t())
if self.size_average:
loss = loss.mean()
else:
loss = loss.sum()
return loss
分类阈值优化
针对比赛刷分,最优化判别类别界限的判断情况,最简单的就是蒙特卡洛搜索穷举一下,或者scipy库的optimize函数,kaggleQA问答缪个大佬采用遗传算法。
# 情感分类
from functools import partial
import numpy as np
import scipy as sp
from sklearn import metrics
class OptimizedRounder(object):
def __init__(self):
self.coef_ = 0
def _f1_loss(self, coef, X, y):
X_p = np.copy(X)
for i in range(len(X_p)):
X_p[i] = np.multiply(coef, X_p[i])
w_logits = np.argmax(X_p, axis=1)
ll = metrics.f1_score(y, w_logits, average='macro')
return -ll
def fit(self, X, y):
loss_partial = partial(self._f1_loss, X=X, y=y)
initial_coef = [1, 1, 1]
self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='Powell')
def coefficients(self):
return self.coef_['x']
def best_w(logits, lables):
optim = OptimizedRounder()
optim.fit(logits, lables)
best_weight = optim.coefficients()
return best_weight
参考文章: