import time
import random
import cv2 as cv
import cv2
import math
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
import datetime
from pega_inference.v1.sample_client import SampleClient
import threading
from threading import Lock
import queue
import json
import chardet
def plot_one_box(filename, x, img, color=None, label=None, line_thickness=2, width = 0, height = 0):
global count
try:
# print("------------plot_one_box------:",x, color, label)
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 2) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
print("------------plot_one_box------:",c2,tl, t_size)
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(img, label, (c1[0] + 2, int(c1[1])-int(height/8)), 0, tl, [0, 0, 255], thickness=2, lineType=cv2.LINE_AA)
cv2.imwrite(filename, img)
except IOError as e:
print(e)
def sample_client_object(img_list,inference_ip, model_name, model_token):
global d1, d2, response_predict, lst_bbox
d1 = datetime.datetime.now()
inference_server_ip = inference_ip
inference_server_url = "http://{}".format(inference_server_ip)
print("model:",inference_ip, model_name, model_token)
client = SampleClient(url=inference_server_url, # ip address of inference server
port="7779", # constant port number: 7945
caller_id="BN05",
model_type="1", # model type corresponding to model name
model_name=model_name, # model name
model_token=model_token) # modle token
# show model version
print("nmodel version :\n" + client.get_version_json())
# do prediction, will return ResponsePredict()
response_predict = client.predict_images(img_list)
d2 = datetime.datetime.now()
# will return list of ResponseOutput()
response_outputs = response_predict.outputs
labelart = []
lst_bbox = []
for output in response_outputs:
for result in output.results:
# result.bbox = [左上 x, 左上 y, 寬, 高]
bbox = result.bbox
bbox[0] = bbox[0] if bbox[0] > 0 else 0
bbox[1] = bbox[1] if bbox[1] > 0 else 0
bbox[2] = bbox[2] if bbox[2] > 0 else 0
bbox[3] = bbox[3] if bbox[3] > 0 else 0
category_id = int(result.category_id)
max_prop = result.category_probs
category_name = result.category_name
prodict_result = {
"class":category_name,
"points":bbox,
"predict": max_prop
}
labelart.append(prodict_result)
return labelart
def drawpoint(filename, predictions):
global count
info = []
tt1 = r'D:\LABELAOI\Test\CambrianProdict'
path, picname = os.path.split(filename)
if not os.path.exists(tt1):
os.mkdir(tt1)
img = cv.imread(filename)
with open(r"D:\LABELAOI\Test\label_position.txt",'a')as f:
for i in predictions:
vertices = i["points"]
## result.bbox - [x, y, width, height ]
x1 = int(vertices[0])
y1 = int(vertices[1])
x2 = int(vertices[2]) + x1
y2 = int(vertices[3]) + y1
img_roi = img[y1:y2, x1:x2]
save_path1 = os.path.join(tt1, f'{picname.split(".")[0]}_{predictions[0]["class"]}_CAM{count}.jpg')
cv2.imwrite(save_path1, img_roi)
imginfo = {
"picname":filename,
"savepath1":save_path1,
"class":i['class'],
"position":vertices,
"predict":i['predict']
}
tt = filename+","+save_path1+","+ str(i['class'])+","+str(i['predict'][0]) +","+ str(vertices[0]) +":"+str(vertices[1])+":"+str(vertices[2])+":"+str(vertices[3])+'\n'
f.write(tt)
info.append(imginfo)
count += 1
return info
class yolovthread(threading.Thread):
def __init__(self,testpic, model_name, model_weight,model_version, model_token):
super().__init__()
self.lock = Lock()
self.testpic = testpic
self.model_name = model_name
self.model_weight = model_weight
self.model_version = model_version
self.model_token = model_token
def run(self):
global cut_info,predict_info
print("the thread is start!",threading.current_thread().getName())
img = cv.imread(self.testpic)
img_list,cutinformation = [], []
img_list.append(img)
result = sample_client_object(img_list, inference_ip, self.model_name, self.model_token)
if "caitu" in self.model_name:
drawpoint(self.testpic, result)
cut_info.append(threading.current_thread().getName())
return
else:
labelinfo = ""
with open("D:\LABELAOI\Test\predict_position.txt", "a")as ff:
labelinfo = self.testpic+","+str(result[0].get('class'))+","+str(round(result[0].get("predict")[0], 2))
ff.write(labelinfo)
ff.write("\n")
predict_info.append(threading.current_thread().getName())
def read_config(filepath):
with open(filepath, "r") as reader:
cfg = json.loads(reader.read())
return cfg
def readDATfile(path):
tt = "NA"
with open(path, "rb")as f:
fixid = f.read()
f_charInfo = chardet.detect(fixid)
tt = fixid.decode(f_charInfo['encoding'])
return tt
def read_predictfile(path):
labels = []
with open(path, "r")as f:
content = f.readlines()
for i in content:
labels.append(i.strip())
return labels
def get_file_basename(path):
path = os.path.basename(path)
filename = os.path.splitext(path)[0]
return filename
def predict():
# bb, cutinformation = '', []
# testpath = "D:\\LABELAOI\\Test"
predict_dict = r"D:\LABELAOI\Test\predict_position.txt"
cut_dict = r"D:\LABELAOI\Test\label_position.txt"
predict_del_info = read_predictfile(predict_dict)
cut_del_info = read_predictfile(cut_dict)
# print(predict_del_info)
for predict_message in predict_del_info:
premessage = predict_message.split(",")
predpicname = get_file_basename(premessage[0])
for i in cut_del_info:
cutpic = i.split(",")
cutpicname = get_file_basename(cutpic[1])
if cutpicname == predpicname:
img1 = cv.imread(cutpic[0])
# width = int(i.split(':')[-2])
width = int(cutpic[-1].split(":")[2])
# height = int(i.split(':')[-1])
height =int(cutpic[-1].split(":")[3])
x1 = int(cutpic[-1].split(":")[0])
y1 = int(cutpic[-1].split(":")[1])
x2 = width + x1
y2 = height + y1
label = str(premessage[1]) + " " + str(premessage[2])
label = str(premessage[1]) + " " + str(premessage[2])
plotimg = plot_one_box(cutpic[0], (x1, y1, x2, y2), img1, label=label,
color=[0, 0, 255], line_thickness=2, width=width,
height=height)
break
def test():
model = 'yolov'
# t1 = time.time()
threading_cut = 0
threading_predict = 0
for model_name in config[fixid][model]:
print("this modelname;",model_name)
model_weight = config[fixid][model][model_name]["weight"]
model_version = config[fixid][model][model_name]["model_version"]
model_token = config[fixid][model][model_name]["model_token"]
# campics = config[fixid][model][model_name]["campics"]
testpath = "D:\\LABELAOI\\Test"
if "caitu" in model_name:
for pic in os.listdir(testpath):
if pic.endswith("jpg"):
picname = os.path.join(testpath, pic)
q.put(picname)
threading_cut = q.qsize()
for i in range(q.qsize()):
testpic = q.get()
t = yolovthread(testpic,model_name, model_weight,model_version, model_token)
t.start()
elif "LABEI" in model_name:
try:
#################### cut img tui lun ##############################
while True:
if threading_cut == len(cut_info):
cutpath = r"D:\LABELAOI\Test\CambrianProdict"
predictpath = os.listdir(cutpath)
predict_pics = [ i if i.endswith(".jpg" ) else '' for i in predictpath]
# print("len for cut img:",predict_pics)
threading_predict = len(predict_pics)
for i in predictpath:
if i.endswith(".jpg" ):
cutimg = os.path.join(cutpath, i)
tp = yolovthread(cutimg, model_name, model_weight,model_version, model_token)
tp.start()
break
else:
time.sleep(1)
#################### cut img tui lun ##############################
except Exception as e:
print(e)
else:
while True:
if threading_predict == len(predict_info):
#################print("biao ji tu pian") ####################3
break
else:
time.sleep(1)
if __name__ == '__main__':
flag = False
count = 0
q = queue.Queue()
cut_info, predict_info = [], []
condtion = threading.Condition()
configpath = "D:\LABELAOI\Config\BOXAOI_baslor.json"
fixidpath = 'D:\LABELAOI\Config\FIXID.INI'
config = read_config(configpath)
fixid = readDATfile(fixidpath)
inference_ip = config[fixid]['inference_ip']
test()
predict()