1 Star 0 Fork 0

ferry / run_network

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
test_neural_network_vedio_221111.py 20.98 KB
一键复制 编辑 原始数据 按行查看 历史
ferry 提交于 2023-04-28 13:37 . first commit
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580
import cv2
import time
import numpy as np
import torch
import torchvision
import onnxruntime
from queue import Queue
import sys
import os
import traceback
import datetime
from multiprocessing import Process,Manager,freeze_support
from multiprocessing import Queue as mQ
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def box_iou(box1, box2):
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
box1 (Tensor[N, 4])
box2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(box1.T)
area2 = box_area(box2.T)
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
labels=(), max_det=300):
"""Runs Non-Maximum Suppression (NMS) on inference results
Returns:
list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
nc = prediction.shape[2] - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Checks
assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'
assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'
# Settings
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# Cat apriori labels if autolabelling
if labels and len(labels[xi]):
l = labels[xi]
v = torch.zeros((len(l), nc + 5), device=x.device)
v[:, :4] = l[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
x = torch.cat((x, v), 0)
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
else: # best class only
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes is not None:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# Check shape
n = x.shape[0] # number of boxes
if not n: # no boxes
continue
elif n > max_nms: # excess boxes
x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
output[xi] = x[i]
if (time.time() - t) > time_limit:
print(f'WARNING: NMS time limit {time_limit}s exceeded')
break # time limit exceeded
return output
def clip_coords(boxes, img_shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
boxes[:, 0].clamp_(0, img_shape[1]) # x1
boxes[:, 1].clamp_(0, img_shape[0]) # y1
boxes[:, 2].clamp_(0, img_shape[1]) # x2
boxes[:, 3].clamp_(0, img_shape[0]) # y2
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
return coords
def plot_image(img,xyxy_list,text):
for xyxy in xyxy_list:
cv2.rectangle(img,(int(xyxy[0]),int(xyxy[1])),(int(xyxy[2]),int(xyxy[3])),(0,0,255),thickness=3)
cv2.putText(img, text, (10, 10), cv2.FONT_HERSHEY_COMPLEX, 2.0, (0, 0, 255))
# new_shape=(640, 640)
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
# new_unpad = (new_shape[1], new_shape[1])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
# new_unpad = (640, 640)
print('new_unpad',new_unpad)
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
print('dw, dh',dw, dh)
return img, ratio, (dw, dh)
def fill_img(img):
print('2333begin', img.shape)
wight_list = [1024, 1280, 1366, 1920, 2560, 3840]
heigh_list = [576, 720, 768, 1080, 1440, 2160]
heigh, wight = img.shape[:2]
min_wight_difference=10000
min_height_difference=10000
wight_index=0
heigh_index=0
for i,wight_n in enumerate(wight_list):
if wight_n - wight >=0 and min_wight_difference > wight_n - wight :
wight_index= i
min_wight_difference=wight_n - wight
for i,heigh_n in enumerate(heigh_list):
if heigh_n - heigh >=0 and min_height_difference > heigh_n -heigh:
heigh_index =i
min_height_difference=heigh_n -heigh
index=max(wight_index,heigh_index)
print('23333',index,heigh_list[index]-heigh, wight_list[index]-wight)
img = cv2.copyMakeBorder(img, 0, heigh_list[index]-heigh, 0, wight_list[index]-wight, cv2.BORDER_CONSTANT)
print('2333end',img.shape)
return img
def load_models(ort_session,queue1,index,txt_path):
classification_names = ['person', 'head', 'helmet', 'traffic', 'truck', 'car', 'bus', 'pickupTruck', 'slagcar',
'slagcar_overload', 'safetyBelt', 'fire']
kk = 0
# path_img = r'../test_data/test_image221026'
# new_path_img = r'../test_data/test_image221026_new'
path_img = r'../test_data/test_image230130'
new_path_img = r'../test_data/test_image230130_new'
if os.path.exists('../test_data') == False:
os.mkdir('../test_data')
if os.path.exists(path_img) == False:
os.mkdir(path_img)
if os.path.exists(new_path_img) == False:
os.mkdir(new_path_img)
print('start load models...........................')
img_ori = []
img_pro = []
shape_list=[]
ori_img = []
c = 0
max_queue1_qsize = 1 #16
print(f"开始分析{index}_______________________________________________________________")
while c < max_queue1_qsize:
try:
print(f"300队列大小{queue1.qsize()}····································")
image = queue1.get()
print(f"取出第{c}张图片·······································")
ori_img.append(image)
frame = fill_img(image.copy())
# frame = image
# frame = cv2.resize(frame, (640, 640))
img_ori.append(frame)
img = frame[:, :, ::-1] # BGR TO RGB
img = letterbox(img)[0]
print('img.shape',img.shape)
img = img.transpose(2, 0, 1)
img = img.astype(np.float32)
img /= 255.
img_pro.append(img)
c += 1
print(f"c·······································",c)
except Exception as e:
print('304err',e)
img_pro = np.stack(img_pro, 0)
print('316img_pro')
pred = ort_session.run(None, {ort_session.get_inputs()[0].name: img_pro})[0]
print('318pred')
pred = non_max_suppression(torch.tensor(pred), 0.25, 0.45, classes=None, agnostic=False)
# # Load model
# # device = select_device(device)
# device = 'cpu'
# model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
# stride, names, pt = model.stride, model.names, model.pt
# imgsz = check_img_size(imgsz, s=stride) # check image size
#
# # Inference
# # visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
# visualize = False
# augment = False, # augmented inference
# pred = model(im, augment=augment, visualize=visualize)
# # NMS
# pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
# ori_img_ori = ori_img.copy()
print('pred',pred)
for i, det in enumerate(pred):
try:
kk+=1
print('i, det',i, det)
len_det = len(det)
result = ''
# y=[]
det[:, :4] = scale_coords(img_pro.shape[2:], det[:, :4], img_ori[i].shape).round()
num = 0
temp_txt = ''
cv2.imwrite(new_path_img + '/tp_' + str(index) + '0' + str(kk) + '.jpg', ori_img[i])
for *xyxy, conf, cls in reversed(det):
num +=1
xyxy_list = []
for x in xyxy:
result += '0' * (4 - len(str(int(x)))) + str(int(x))
result += '0' * (4 - len(str(int(cls)))) + str(int(cls))
result += f'{conf:.2f}'
color = (0, 255, 0)
# cv2.rectangle(img_ori[i], (int(xyxy[0]), int(xyxy[1])), (int(xyxy[2]), int(xyxy[3])), (0, 255, 0), 2)
print('result',result)
print('cls',cls,int(cls))
if int(cls) == classification_names.index('head'):
color = (0, 0, 255)
elif int(cls) == classification_names.index('car') or int(cls) == classification_names.index('pickupTruck'):
color = (255, 0, 0)
# cv2.rectangle(img_ori[i], (int(xyxy[0]), int(xyxy[1])), (int(xyxy[2]), int(xyxy[3])), color, 2)
cv2.rectangle(ori_img[i], (int(xyxy[0]), int(xyxy[1])), (int(xyxy[2]), int(xyxy[3])), color, 2)
temp_txt+=str(int(cls)) + ' '
# temp_p =
temp_txt += str(str(int(xyxy[0])) + ' '+ str(int(xyxy[1]))+ ' '+ str(int(xyxy[2]))+ ' '+str(int(xyxy[3]))) + '\n'
# import numpy as np
# cropped = img[0:128, 0:512] # 裁剪坐标为[y0:y1, x0:x1]
x1,y1,x2,y2 = xyxy
print('352xyxy',xyxy)
print('353x1,y1,x2,y2 ',x1,y1,x2,y2 )
x1, y1, x2, y2 = int(x1),int(y1),int(x2),int(y2)
print('355x1,y1,x2,y2 ', x1, y1, x2, y2)
import copy
img_temp = copy.deepcopy(img_ori[i])[y1:y2, x1:x2]
average_img = np.average(img_temp, axis=(0, 1))
average_img3= np.average(average_img)
print('355average_img', average_img,np.average(average_img))
img_temp2 = copy.deepcopy(img_ori[i])[y1:y2, x1:x2]
img_temp2[:, :, 0], img_temp2[:, :, 1], img_temp2[:, :, 2] =average_img
# cv2.imwrite(path_img + '/' +str(index)+'0'+ str(kk) +'_' +str(num)+ '.jpg', img_temp2)
# cv2.imshow('image', img_ori[i])
# k = cv2.waitKey(1) # q键退出
cv2.imwrite(path_img + '/tp_' +str(index)+'0'+ str(kk) + '.jpg', ori_img[i])
with open(txt_path + '/tp_'+str(index)+'0'+ str(kk)+ '.txt','w') as f:
print('403temp_txt',temp_txt)
f.write(temp_txt)
except Exception as e:
print('409err',e)
print(f'测试完第{index*max_queue1_qsize}')
def run_load_models(queue,txt_path):
# onnx_path = './weigth/best_yolov5m_16.onnx'
# onnx_path = './weigth/Hel_V3_shujuliuanle_300_no.onnx'
# onnx_path = './weigth/best2.onnx'
# onnx_path = 'weigth/Hel_V32_all_9k_height_l_300_ok.onnx'
# onnx_path = './weigth/yolov5l.onnx'
# onnx_path = './weigth/yolov5m_640384.onnx'
# onnx_path = './weigth/yolov5m384640.onnx'
# onnx_path = './weigth/yolov5m640640.onnx'
# onnx_path = './weigth/yolov5m384384.onnx'
# onnx_path = './weigth/yolov5l829.onnx'
# onnx_path = './weigth/yolov5l829T.onnx'
# onnx_path = './weigth/yolov5l_ori830.onnx'
onnx_path = './weigth/Hel_V31_all_9k_height_m_300_1ok.onnx'
# onnx_path = './weigth/Hel_V31_all_9k_height_m_300_16ok.onnx'
#CPU
# ort_session = onnxruntime.InferenceSession(onnx_path)
ort_session = onnxruntime.InferenceSession(onnx_path, providers=['CPUExecutionProvider'])
#GPU
# ort_session = onnxruntime.InferenceSession(onnx_path,providers=['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider'])
# ort_session = onnxruntime.InferenceSession(onnx_path, providers=['CUDAExecutionProvider'])
if False:
import onnx2
# 加载模型
model = onnx.load(onnx_path)
# 检查模型格式是否完整及正确
onnx.checker.check_model(model)
# 获取输出层,包含层名称、维度信息
output = model.graph.output
print('375output',output)
if False:
# import onnx
# onnx_model = onnx.load(onnx_path)
# graph = onnx_model.graph
import netron
netron.start(onnx_path)
# if True:
# zjr_session = onnxruntime.InferenceSession(
# onnx_path,
# providers=onnxruntime.get_available_providers()
# )
index = 0
while True:
if queue.qsize()==0:
time.sleep(1)
else:
print(f"run_load_models放入队列大小{queue.qsize()}·······································")
load_models(ort_session, queue, index,txt_path)
index += 1
#
#
# def get_image(path,queue):
# vedio_list = os.listdir(path)
# print('vedio_list',vedio_list)
# for video in vedio_list:
# if not video.endswith('.mp4'):
# continue
# v_path= os.path.join(path , video)
# print('347v_path',v_path)
# cap = cv2.VideoCapture(v_path)
# fps = cap.get(cv2.CAP_PROP_FPS)
# print('fps',fps)
# vedio_fps = 0
# i = 0
# try:
# ret, frame = cap.read()
# # cap = cv2.VideoCapture("test/test.mp4")
# # while cap.isOpened():
# while ret:
# ret, frame = cap.read()
# # print('456ret, frame', ret, frame)
# if i %(fps*3) == 0:
# # print('359frame',frame.shape)
# # img.shape
# queue.put(frame)
# time.sleep(0.1)
# print(f'第{i}张图片')
# print(f"364放入队列大小{queue.qsize()}·······································")
# if queue.qsize()>50:
# time.sleep(100)
# # else:
# # ret, frame = cap.read()
# i += 1
# if i%(16*50+1) ==0:
# print(i)
# # time.sleep(1)
# except:
# print('490err')
#
def get_image(path,queue):
vedio_list = os.listdir(path)
print('vedio_list',len(vedio_list),vedio_list)
for video in vedio_list:
if video.endswith('.mp4'):
Flag = 'mp4'
elif video.endswith('.jpg') or video.endswith('.png'):
Flag = 'img'
else:
Flag = False
if Flag == 'mp4':
v_path= os.path.join(path , video)
print('347v_path',v_path)
cap = cv2.VideoCapture(v_path)
fps = cap.get(cv2.CAP_PROP_FPS)
print('fps',fps)
vedio_fps = 0
i = 0
try:
ret, frame = cap.read()
# cap = cv2.VideoCapture("test/test.mp4")
# while cap.isOpened():
while ret:
ret, frame = cap.read()
# print('456ret, frame', ret, frame)
if i %(fps*3) == 0:
# print('359frame',frame.shape)
# img.shape
queue.put(frame)
time.sleep(0.1)
print(f'第{i}张图片')
print(f"364放入队列大小{queue.qsize()}·······································")
if queue.qsize() > 5:
time.sleep(queue.qsize())
# else:
# ret, frame = cap.read()
i += 1
if i%(16*50+1) ==0:
print(i)
# time.sleep(1)
except:
print('490err')
elif Flag == 'img':
frame = cv2.imread(os.path.join(path , video))
queue.put(frame)
time.sleep(0.1)
# print(f'第{i}张图片')
print(f"364放入队列大小{queue.qsize()}·······································")
if queue.qsize() > 5:
time.sleep(queue.qsize())
else:
print('561name',video)
print('完成取zhng')
if __name__ == '__main__':
# path = r"D:\work_syld\data\syld\test"
# path = r"./test_Data"
# path = r'C:\Users\DELL\Desktop\img1'
# path = r'C:\Users\DELL\Desktop\img1'
txt_path = '../lable'
if not os.path.exists(txt_path):
os.mkdir(txt_path)
# txt_path+=
path = r'D:\Desktop\img3'
q1=mQ()
p2 = Process(target=get_image, args=(path, q1))
p2.start()
p1=Process(target=run_load_models,args=(q1,txt_path))
p1.start()
p1.join()
p2.join()
print('完成检测')
Python
1
https://gitee.com/ferry_zhou/run_network.git
git@gitee.com:ferry_zhou/run_network.git
ferry_zhou
run_network
run_network
master

搜索帮助