利用torchvision库实现目标检测与语义分割

本文详细介绍了如何在Python中使用torchvision库实现目标检测(如COCO数据集上的实例分割)和语义分割,分别展示了maskrcnn和DeepLabV3模型的应用。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

一、介绍

利用torchvision库实现目标检测与语义分割。

二、代码

1、目标检测

from PIL import Image
import matplotlib.pyplot as plt
import torchvision.transforms as T
import torchvision
import numpy as np
import cv2
import random


COCO_INSTANCE_CATEGORY_NAMES = [
    '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
    'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
    'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
    'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
    'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
    'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
    'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
    'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
    'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
    'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
    'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]


def get_prediction(img_path, threshold):
    # 加载 mask_r_cnn 模型进行目标检测
    model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
    model.eval()
    img = Image.open(img_path)
    transform = T.Compose([T.ToTensor()])
    img = transform(img)
    pred = model([img])
    pred_score = list(pred[0]['scores'].detach().numpy())
    print(pred[0].keys())  # ['boxes', 'labels', 'scores', 'masks']
    pred_t = [pred_score.index(x) for x in pred_score if x > threshold][-1]  # num of boxes
    pred_masks = (pred[0]['masks'] > 0.5).squeeze().detach().cpu().numpy()
    pred_boxes = [[(int(i[0]), int(i[1])), (int(i[2]), int(i[3]))] for i in list(pred[0]['boxes'].detach().numpy())]
    pred_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(pred[0]['labels'].numpy())]
    pred_masks = pred_masks[:pred_t + 1]
    pred_boxes = pred_boxes[:pred_t + 1]
    pred_class = pred_class[:pred_t + 1]
    return pred_masks, pred_boxes, pred_class


def random_colour_masks(image):
    colours = [[0, 255, 0], [0, 0, 255], [255, 0, 0], [0, 255, 255], [255, 255, 0], [255, 0, 255], [80, 70, 180],
               [250, 80, 190], [245, 145, 50], [70, 150, 250], [50, 190, 190]]
    r = np.zeros_like(image).astype(np.uint8)
    g = np.zeros_like(image).astype(np.uint8)
    b = np.zeros_like(image).astype(np.uint8)
    r[image == 1], g[image == 1], b[image == 1] = colours[random.randrange(0, 10)]
    coloured_mask = np.stack([r, g, b], axis=2)
    return coloured_mask


def instance_segmentation_api(img_path, threshold=0.5, rect_th=3, text_size=2, text_th=2):
    masks, boxes, cls = get_prediction(img_path, threshold)
    img = cv2.imread(img_path)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    for i in range(len(masks)):
        rgb_mask = random_colour_masks(masks[i])
        randcol = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
        img = cv2.addWeighted(img, 1, rgb_mask, 0.5, 0)
        cv2.rectangle(img, boxes[i][0], boxes[i][1], color=randcol, thickness=rect_th)
        cv2.putText(img, cls[i], boxes[i][0], cv2.FONT_HERSHEY_SIMPLEX, text_size, randcol, thickness=text_th)
    plt.figure(figsize=(20, 30))
    plt.imshow(img)
    plt.xticks([])
    plt.yticks([])
    plt.show()
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    cv2.imwrite('result_det.jpg', img)


if __name__ == '__main__':
    instance_segmentation_api('horse.jpg')

 

 

2、语义分割

import torch
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
from torchvision import models
from torchvision import transforms


def pre_img(img):
    if img.mode == 'RGBA':
        a = np.asarray(img)[:, :, :3]
        img = Image.fromarray(a)
    return img


def decode_seg_map(image, nc=21):
    label_colors = np.array([(0, 0, 0), (128, 0, 0), (0, 128, 0), (128, 128, 0),
                             (0, 0, 128), (128, 0, 128), (0, 128, 128), (128, 128, 128),
                             (64, 0, 0), (192, 0, 0), (64, 128, 0), (192, 128, 0),
                             (64, 0, 128), (192, 0, 128), (64, 128, 128), (192, 128, 128),
                             (0, 64, 0), (128, 64, 0), (0, 192, 0), (128, 192, 0), (0, 64, 128)])
    r = np.zeros_like(image).astype(np.uint8)
    g = np.zeros_like(image).astype(np.uint8)
    b = np.zeros_like(image).astype(np.uint8)

    for l in range(0, nc):
        idx = image == l
        r[idx] = label_colors[l, 0]
        g[idx] = label_colors[l, 1]
        b[idx] = label_colors[l, 2]

    return np.stack([r, g, b], axis=2)


if __name__ == '__main__':
    # 加载 deep_lab_v3 模型进行语义分割
    model = models.segmentation.deeplabv3_resnet101(pretrained=True)
    model = model.eval()

    img = Image.open('horse.jpg')
    print(img.size)  # (694, 922)
    plt.imshow(img)
    plt.axis('off')
    plt.show()

    im = pre_img(img)
    transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    input_img = transform(im).unsqueeze(0)  # resize
    tt = np.transpose(input_img.detach().numpy()[0], (1, 2, 0))  # transpose
    print(tt.shape)  # (224, 224, 3)
    plt.imshow(tt)
    plt.axis('off')
    plt.show()

    output = model(input_img)
    print(output.keys())  # odict_keys(['out', 'aux'])
    print(output['out'].shape)  # torch.Size([1, 21, 224, 224])
    output = torch.argmax(output['out'].squeeze(), dim=0).detach().cpu().numpy()
    result_class = set(list(output.flat))
    print(result_class)  # {0, 13, 15}

    rgb = decode_seg_map(output)
    print(rgb.shape)  # (224, 224, 3)
    img = Image.fromarray(rgb)
    img.save('result_seg.jpg')
    plt.axis('off')
    plt.imshow(img)
    plt.show()

 

 

三、参考

Pytorch预训练模型、内置模型实现图像分类、检测和分割

### 关于深度学习中目标检测语义分割的概念 #### 目标检测概述 目标检测旨在识别图像或视频帧内的特定对象,并提供其位置信息。通常情况下,这涉及到绘制边界框并标记类别标签。此过程不仅限于简单地标记物体的存在否,还涉及精确定位物体的位置[^1]。 ```python import tensorflow as tf from object_detection.utils import label_map_util from object_detection.utils import visualization_utils as viz_utils def load_model(model_name): model_dir = f"./models/{model_name}/saved_model" model = tf.saved_model.load(str(model_dir)) return model def run_inference_for_single_image(image, detection_graph): with detection_graph.as_default(): image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)}) # all outputs are float32 numpy arrays, so convert types as appropriate output_dict['num_detections'] = int(output_dict['num_detections'][0]) output_dict['detection_classes'] = output_dict[ 'detection_classes'][0].astype(np.uint8) output_dict['detection_boxes'] = output_dict['detection_boxes'][0] output_dict['detection_scores'] = output_dict['detection_scores'][0] return output_dict ``` #### 语义分割介绍 不同于仅关注个体实例的目标检测语义分割致力于理解整个场景的内容。它为输入图片中的每一个像素分配一个类别标签,从而实现对不同区域的精细划分。这种方法能够捕捉更丰富的空间关系,在自动驾驶汽车感知周围环境等方面具有重要价值[^2]。 ```python import torch from torchvision.models.segmentation import deeplabv3_resnet101 device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') model = deeplabv3_resnet101(pretrained=True).to(device) model.eval() input_image = Image.open("example.jpg").convert("RGB") preprocess = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) input_tensor = preprocess(input_image) input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model with torch.no_grad(): output = model(input_batch)['out'][0] output_predictions = output.argmax(0) palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1]) colors = torch.as_tensor([i for i in range(21)])[:, None] * palette colors = (colors % 255).numpy().astype("uint8") r = Image.fromarray(output_predictions.byte().cpu().numpy()).resize(input_image.size) r.putpalette(colors) plt.imshow(r) plt.show() ``` #### 实现方法对比 两者虽然都属于计算机视觉范畴下的任务类型,但在具体实施过程中存在显著差异: - 对于目标检测而言,常用框架如YOLO系列、Faster R-CNN等采用候选区域生成加分类器的方式完成工作流;而SSD则利用多尺度特征图直接预测边界框及其对应的类概率分布。 - 在处理语义分割问题时,则更多依赖编码器-解码器架构(Encoder-Decoder Architecture),例如U-net结构,该种方式可以有效地保留原始分辨率的同时获取全局上下文信息,进而提高最终输出的质量[^4].
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值