服务器端代码分2块:a.FLASK服务, b.模型处理类,相关文件直接丢入YOLOV5的文件夹中,把YOLOV5文件夹放到PYTHON项目内。
a.Flask服务:
import time
from flask import Flask, request, jsonify
import cv2
import numpy as np
from myyolov5 import myYOLOv5
from my2yolov5 import my2YOLOv5
app = Flask(__name__)
det = myYOLOv5()
det2 = my2YOLOv5()
@app.route("/close", methods=["POST"])
def predict():
if not request.method == "POST":
return
result = {"success": False}
if request.files.get("image"):
# 得到客户端传输的图像
start = time.time()
input_image = request.files["image"].read()
imBytes = np.frombuffer(input_image, np.uint8)
iImage = cv2.imdecode(imBytes, cv2.IMREAD_COLOR)
# 执行推理
outs = det.infer(iImage)
print("duration: ", time.time() - start)
if (outs is None) and (len(outs) < 0):
result["success"] = False
# 将结果保存为json格式
result["box"] = outs[0].tolist()
result["conf"] = outs[1].tolist()
result["classid"] = outs[2].tolist()
result['success'] = True
return jsonify(result)
@app.route("/wz", methods=["POST"])
def predict2():
if not request.method == "POST":
return
result = {"success": False}
if request.files.get("image"):
# 得到客户端传输的图像
start = time.time()
input_image = request.files["image"].read()
imBytes = np.frombuffer(input_image, np.uint8)
iImage = cv2.imdecode(imBytes, cv2.IMREAD_COLOR)
# 执行推理
outs = det2.infer(iImage)
print("duration: ", time.time() - start)
if (outs is None) and (len(outs) < 0):
result["success"] = False
# 将结果保存为json格式
result["box"] = outs[0].tolist()
result["conf"] = outs[1].tolist()
result["classid"] = outs[2].tolist()
result['success'] = True
return jsonify(result)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8989, debug=True)
b.模型处理类,这个是 myYOLOv5.py, my2YOLOv5.py和这个类似,就是class name不一样
import numpy as npimport torchfrom models.experimental import attempt_loadfrom utils.datasets import letterboxfrom utils.general import check_img_size, non_max_suppression, scale_coordsfrom utils.torch_utils import select_device
class myYOLOv5(object):
# 参数设置
_defaults = {"weights": "./closezxw.pt","imgsz": 640,"iou_thres": 0.45,"conf_thres": 0.9,# "classes": 0 # 只检测人}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
# 初始化操作,加载模型
def __init__(self, device='0', **kwargs):
self.__dict__.update(self._defaults)
self.device = select_device(device)
self.half = self.device != "cpu"
self.model = attempt_load(self.weights, map_location=self.device) # load FP32 model
self.imgsz = check_img_size(self.imgsz, s=self.model.stride.max()) # check img_size
if self.half:
self.model.half() # to FP16
# 推理部分
def infer(self, inImg):
# 使用letterbox方法将图像大小调整为640大小
img = letterbox(inImg, new_shape=self.imgsz)[0]
# 归一化与张量转换
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(self.device)
img = img.half() if self.half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# 推理
pred = self.model(img, augment=True)[0]
# NMS
# pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, classes=self.classes, agnostic=True)
pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, agnostic=True)
bbox_xyxy = []
confs = []
cls_ids = []
# 解析检测结果
for i, det in enumerate(pred): # detections per image
if det is not None and len(det):
# 将检测框映射到原始图像大小
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], inImg.shape).round()
# 保存结果
for *xyxy, conf, cls in reversed(det):
bbox_xyxy.append(xyxy)
confs.append(conf.item())
cls_ids.append(int(cls.item()))
xyxys = torch.Tensor(bbox_xyxy)
confss = torch.Tensor(confs)
cls_ids = torch.Tensor(cls_ids)
return xyxys, confss, cls_ids
客户端调用:
import json
import pprint
import requests
import cv2
SERVER_URL = "http://192.168.250.10:8989/wz"
for i in range(1,5):
TEST_IMAGE = f"testbg/{i}.png"
# 发送 POST 请求
image_data = open(TEST_IMAGE, "rb").read()
response = requests.post(SERVER_URL, files={"image": image_data})
response_dict = json.loads(response.text)
pprint.pprint(response_dict)
# 读取图片
img = cv2.imread(TEST_IMAGE)
# 在图片上画出红框和置信度信息
for box, conf, classid in zip(response_dict['box'], response_dict['conf'], response_dict['classid']):
cv2.rectangle(img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 0, 255), 2)
cv2.putText(img, f"{conf:.2f}", (int(box[0]), int(box[1])-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.putText(img, f"{classid:.0f}", (int(box[0]) - 20, int(box[1])-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (50, 200, 50), 2)
# 缩小图片
img = cv2.resize(img, (int(img.shape[1] * 0.7), int(img.shape[0] * 0.7)))
# 显示图片
cv2.imshow(str(i), img)
cv2.waitKey(0)
cv2.destroyAllWindows()
返回的结果:
{'box': [[1549.0, 1108.0, 1785.0, 1150.0],
[1859.0, 1117.0, 2081.0, 1155.0],
[1594.0, 1189.0, 1985.0, 1245.0],
[1192.0, 1194.0, 1590.0, 1240.0]],
'classid': [1.0, 2.0, 4.0, 3.0],
'conf': [0.8642578125, 0.87451171875, 0.8828125, 0.89599609375],
'success': True}
展示的结果:
