Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
eg1 : float 超出 0-1范围
import numpy as np
import matplotlib.pyplot as plt
import torch as t
import cv2
# image1=cv2.imread(r"..\..\..\数据集和预训练模型\datasets\datasets\train\cat\cat.0.jpg")
image1=cv2.imdecode(np.fromfile(r"..\..\..\数据集和预训练模型\datasets\datasets\train\cat\cat.0.jpg",dtype=np.uint8),-1)
print(image1.shape)
image1=t.tensor(image1,dtype=t.float32).permute(2,0,1).permute(1,2,0)
print(image1[:,:,2])
image1=np.array(image1)
b,g,r=cv2.split(image1)
image1=cv2.merge([r,g,b])
print(image1[:,:,0])
plt.imshow(image1)
plt.show()
D:\Anaconda\envs\deep_learning\python.exe E:/classification-pytorch-main/classification-pytorch-main/utils/dataloader.py
(374, 500, 3)
tensor([[203., 203., 204., ..., 240., 239., 238.],
[203., 203., 204., ..., 241., 240., 238.],
[203., 203., 204., ..., 241., 240., 239.],
...,
[153., 153., 153., ..., 2., 2., 2.],
[152., 152., 152., ..., 2., 2., 2.],
[151., 151., 151., ..., 1., 1., 1.]])
[[203. 203. 204. ... 240. 239. 238.]
[203. 203. 204. ... 241. 240. 238.]
[203. 203. 204. ... 241. 240. 239.]
...
[153. 153. 153. ... 2. 2. 2.]
[152. 152. 152. ... 2. 2. 2.]
[151. 151. 151. ... 1. 1. 1.]]
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
eg2: float 回到0-1范围
import numpy as np
import matplotlib.pyplot as plt
import torch as t
import cv2
# image1=cv2.imread(r"..\..\..\数据集和预训练模型\datasets\datasets\train\cat\cat.0.jpg")
image1=cv2.imdecode(np.fromfile(r"..\..\..\数据集和预训练模型\datasets\datasets\train\cat\cat.0.jpg",dtype=np.uint8),-1)
print(image1.shape)
image1=t.tensor(image1,dtype=t.float32).permute(2,0,1).permute(1,2,0)
print(image1[:,:,2])
image1=np.array(image1)
b,g,r=cv2.split(image1)
image1=cv2.merge([r,g,b])/255
print(image1[:,:,0])
plt.imshow(image1)
plt.show()
D:\Anaconda\envs\deep_learning\python.exe E:/transformer_1/classification-pytorch-main/classification-pytorch-main/utils/dataloader.py
(374, 500, 3)
tensor([[203., 203., 204., ..., 240., 239., 238.],
[203., 203., 204., ..., 241., 240., 238.],
[203., 203., 204., ..., 241., 240., 239.],
...,
[153., 153., 153., ..., 2., 2., 2.],
[152., 152., 152., ..., 2., 2., 2.],
[151., 151., 151., ..., 1., 1., 1.]])
[[0.79607844 0.79607844 0.8 ... 0.9411765 0.9372549 0.93333334]
[0.79607844 0.79607844 0.8 ... 0.94509804 0.9411765 0.93333334]
[0.79607844 0.79607844 0.8 ... 0.94509804 0.9411765 0.9372549 ]
...
[0.6 0.6 0.6 ... 0.00784314 0.00784314 0.00784314]
[0.59607846 0.59607846 0.59607846 ... 0.00784314 0.00784314 0.00784314]
[0.5921569 0.5921569 0.5921569 ... 0.00392157 0.00392157 0.00392157]]
eg3: int 保持在 0-255
import numpy as np
import matplotlib.pyplot as plt
import torch as t
import cv2
# image1=cv2.imread(r"..\..\..\数据集和预训练模型\datasets\datasets\train\cat\cat.0.jpg")
image1=cv2.imdecode(np.fromfile(r"..\..\..\数据集和预训练模型\datasets\datasets\train\cat\cat.0.jpg",dtype=np.uint8),-1)
print(image1.shape)
image1=t.tensor(image1,dtype=t.int32).permute(2,0,1).permute(1,2,0)
print(image1[:,:,2])
image1=np.array(image1)
b,g,r=cv2.split(image1)
image1=cv2.merge([r,g,b])
print(image1[:,:,0])
plt.imshow(image1)
plt.show()
D:\Anaconda\envs\deep_learning\python.exe E:/transformer_1/classification-pytorch-main/classification-pytorch-main/utils/dataloader.py
(374, 500, 3)
tensor([[203, 203, 204, ..., 240, 239, 238],
[203, 203, 204, ..., 241, 240, 238],
[203, 203, 204, ..., 241, 240, 239],
...,
[153, 153, 153, ..., 2, 2, 2],
[152, 152, 152, ..., 2, 2, 2],
[151, 151, 151, ..., 1, 1, 1]], dtype=torch.int32)
[[203 203 204 ... 240 239 238]
[203 203 204 ... 241 240 238]
[203 203 204 ... 241 240 239]
...
[153 153 153 ... 2 2 2]
[152 152 152 ... 2 2 2]
[151 151 151 ... 1 1 1]]
eg4: int 转化为 0-1 float
import numpy as np
import matplotlib.pyplot as plt
import torch as t
import cv2
# image1=cv2.imread(r"..\..\..\数据集和预训练模型\datasets\datasets\train\cat\cat.0.jpg")
image1=cv2.imdecode(np.fromfile(r"..\..\..\数据集和预训练模型\datasets\datasets\train\cat\cat.0.jpg",dtype=np.uint8),-1)
print(image1.shape)
image1=t.tensor(image1,dtype=t.int32).permute(2,0,1).permute(1,2,0)
print(image1[:,:,2])
image1=np.array(image1)
b,g,r=cv2.split(image1)
image1=cv2.merge([r,g,b])/255
print(image1[:,:,0])
plt.imshow(image1)
plt.show()
D:\Anaconda\envs\deep_learning\python.exe E:/transformer_1/classification-pytorch-main/classification-pytorch-main/utils/dataloader.py
(374, 500, 3)
tensor([[203, 203, 204, ..., 240, 239, 238],
[203, 203, 204, ..., 241, 240, 238],
[203, 203, 204, ..., 241, 240, 239],
...,
[153, 153, 153, ..., 2, 2, 2],
[152, 152, 152, ..., 2, 2, 2],
[151, 151, 151, ..., 1, 1, 1]], dtype=torch.int32)
[[0.79607843 0.79607843 0.8 ... 0.94117647 0.9372549 0.93333333]
[0.79607843 0.79607843 0.8 ... 0.94509804 0.94117647 0.93333333]
[0.79607843 0.79607843 0.8 ... 0.94509804 0.94117647 0.9372549 ]
...
[0.6 0.6 0.6 ... 0.00784314 0.00784314 0.00784314]
[0.59607843 0.59607843 0.59607843 ... 0.00784314 0.00784314 0.00784314]
[0.59215686 0.59215686 0.59215686 ... 0.00392157 0.00392157 0.00392157]]