1 Star 2 Fork 0

learning-limitless / DuEL-transformers

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
use_simple.py 3.06 KB
一键复制 编辑 原始数据 按行查看 历史
import torch
from torchcrf import CRF
import numpy as np
def use_crf():
num_tags = 5 # number of tags is 5
model = CRF(num_tags)
seq_length = 3 # maximum sequence length in a batch
batch_size = 2 # number of samples in the batch
emissions = torch.randn(seq_length, batch_size, num_tags)
print(emissions)
tags = torch.tensor([[0, 1], [2, 4], [3, 1]], dtype=torch.long) # (seq_length, batch_size)
out = model(emissions, tags)
print(out)
# mask size is (seq_length, batch_size)
# the last sample has length of 1
mask = torch.tensor([[1, 1], [1, 1], [1, 0]], dtype=torch.uint8)
out = model(emissions, tags, mask=mask)
print(out)
decoded_out = model.decode(emissions)
print(decoded_out)
def use_seqeval():
from seqeval.metrics import classification_report
y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
print(classification_report(y_true, y_pred))
def use_cross_entropy_loss():
from torch.nn.modules.loss import CrossEntropyLoss
loss_fct = CrossEntropyLoss()
type_loss = loss_fct(torch.tensor([[0.3, 0.6], [0.9, 0.1]]), torch.tensor([1, 0]))
print(type_loss)
def use_yield_a():
for i in range(10):
yield i
def use_yield_b():
yield_a = use_yield_a()
for j in yield_a:
yield j
def use_yield_c():
for k in use_yield_b():
print(k)
def use_fct():
from torch.nn import CrossEntropyLoss
loss_fct = CrossEntropyLoss()
type_probs = torch.tensor([[0.8, 0.2], [0.3, 0.7], [0.7, 0.3]])
# type_probs = torch.tensor([[0.8,0.3,0.7], [0.2,0.7,0.3]])
types = torch.tensor([[0], [1], [0]])
type_loss = loss_fct(type_probs, types.view(-1))
print(type_loss)
def use_sigmoid():
x = torch.tensor([[0.8, 0.2], [0.3, 0.7], [0.7, 0.3]])
y = torch.sigmoid(x)
print(y.shape)
print(y)
def use_bce():
from torch.nn import BCEWithLogitsLoss
logits = torch.tensor([[0.8], [0.3], [0.7]])
labels = torch.tensor([[1], [0], [1]])
loss = BCEWithLogitsLoss()(logits, labels.float())
print(loss)
def use_mr():
from torch.nn import MarginRankingLoss
rank_loss = MarginRankingLoss()
l_score = torch.tensor([[10.0], [0.3], [0.7]])
r_score = torch.tensor([[0.2], [0.6], [0.4]])
# 正确的标签 应该给出更低的loss
label = torch.tensor([[1], [0], [1]])
print(rank_loss(l_score, r_score, 2 * label - 1))
# 错误的标签 应该给出更高的loss
label = torch.tensor([[0], [1], [0]])
print(rank_loss(l_score, r_score, 2 * label - 1))
score_diff = (l_score - r_score).numpy()
label = np.array([0,1,0])
print(score_diff.mean(axis=-1))
print((score_diff.mean(axis=-1) > 0) == label)
label_precision = ((score_diff.mean(axis=-1) > 0) == label).astype(float).mean()
print(label_precision)
if __name__ == '__main__':
use_mr()
# use_bce()
# use_sigmoid()
# use_fct()
# use_yield_c()
# use_seqeval()
# use_crf()
Python
1
https://gitee.com/leaning-limitless/DuEL-transformers.git
git@gitee.com:leaning-limitless/DuEL-transformers.git
leaning-limitless
DuEL-transformers
DuEL-transformers
master

搜索帮助