1.transforms.Normalize()函数的计算过程。
import torch
import numpy as np
from torchvision import transforms
data = np.array([
[[1,1,1],[1,1,1],[1,1,1],[1,1,1],[1,1,1]],
[[2,2,2],[2,2,2],[2,2,2],[2,2,2],[2,2,2]],
[[3,3,3],[3,3,3],[3,3,3],[3,3,3],[3,3,3]],
[[4,4,4],[4,4,4],[4,4,4],[4,4,4],[4,4,4]],
[[5,5,5],[5,5,5],[5,5,5],[5,5,5],[5,5,5]]
],dtype='uint8')
print(data.shape)
data = transforms.ToTensor()(data)
print(data.shape)
print(data)
data = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])(data)
print(data)
(5, 5, 3)
torch.Size([3, 5, 5])
tensor([[[0.0039, 0.0039, 0.0039, 0.0039, 0.0039],
[0.0078, 0.0078, 0.0078, 0.0078, 0.0078],
[0.0118, 0.0118, 0.0118, 0.0118, 0.0118],
[0.0157, 0.0157, 0.0157, 0.0157, 0.0157],
[0.0196, 0.0196, 0.0196, 0.0196, 0.0196]],
[[0.0039, 0.0039, 0.0039, 0.0039, 0.0039],
[0.0078, 0.0078, 0.0078, 0.0078, 0.0078],
[0.0118, 0.0118, 0.0118, 0.0118, 0.0118],
[0.0157, 0.0157, 0.0157, 0.0157, 0.0157],
[0.0196, 0.0196, 0.0196, 0.0196, 0.0196]],
[[0.0039, 0.0039, 0.0039, 0.0039, 0.0039],
[0.0078, 0.0078, 0.0078, 0.0078, 0.0078],
[0.0118, 0.0118, 0.0118, 0.0118, 0.0118],
[0.0157, 0.0157, 0.0157, 0.0157, 0.0157],
[0.0196, 0.0196, 0.0196, 0.0196, 0.0196]]])
tensor([[[-2.1008, -2.1008, -2.1008, -2.1008, -2.1008],
[-2.0837, -2.0837, -2.0837, -2.0837, -2.0837],
[-2.0665, -2.0665, -2.0665, -2.0665, -2.0665],
[-2.0494, -2.0494, -2.0494, -2.0494, -2.0494],
[-2.0323, -2.0323, -2.0323, -2.0323, -2.0323]],
[[-2.0182, -2.0182, -2.0182, -2.0182, -2.0182],
[-2.0007, -2.0007, -2.0007, -2.0007, -2.0007],
[-1.9832, -1.9832, -1.9832, -1.9832, -1.9832],
[-1.9657, -1.9657, -1.9657, -1.9657, -1.9657],
[-1.9482, -1.9482, -1.9482, -1.9482, -1.9482]],
[[-1.7870, -1.7870, -1.7870, -1.7870, -1.7870],
[-1.7696, -1.7696, -1.7696, -1.7696, -1.7696],
[-1.7522, -1.7522, -1.7522, -1.7522, -1.7522],
[-1.7347, -1.7347, -1.7347, -1.7347, -1.7347],
[-1.7173, -1.7173, -1.7173, -1.7173, -1.7173]]])
[Finished in 1.6s]
2.transforms.Normalize(mean=[0.485, 0.456,
0.406],std=[0.229, 0.224, 0.2])参数由来。
import torch
import numpy as np
from torchvision import transforms
# 链接:https://zhuanlan.zhihu.com/p/414242338
data = np.array([
[[1,1,1],[1,1,1],[1,1,1],[1,1,1],[1,1,1]],
[[2,2,2],[2,2,2],[2,2,2],[2,2,2],[2,2,2]],
[[3,3,3],[3,3,3],[3,3,3],[3,3,3],[3,3,3]],
[[4,4,4],[4,4,4],[4,4,4],[4,4,4],[4,4,4]],
[[5,5,5],[5,5,5],[5,5,5],[5,5,5],[5,5,5]]
],dtype='uint8')
print(data.shape)
data = transforms.ToTensor()(data) #将数据转为C,W,H,并归一化到[0,1]
print(data.shape)
data = torch.unsqueeze(data,0) # 需要对数据进行扩维,增加batch维度
print(data.shape)
channel_mean = torch.zeros(3)
channel_std = torch.zeros(3)
print(channel_mean)
N, C, H, W = data.shape[:4]
data = data.view(N, C, -1) #将w,h维度的数据展平,为batch,channel,data,然后对三个维度上的数分别求和和标准差
print(data.shape)
print(data)
#展平后,w,h属于第二维度,对他们求平均,sum(0)为将同一纬度的数据累加
channel_mean += data.mean(2).sum(0)
# print(channel_mean)
#展平后,w,h属于第二维度,对他们求标准差,sum(0)为将同一纬度的数据累加
channel_std += data.std(2).sum(0)
#获取所有batch的数据,这里为1
nb_samples = 0.
#创建3维的空列表
nb_samples += N
#获取同一batch的均值和标准差
channel_mean /= nb_samples
channel_std /= nb_samples
print(channel_mean, channel_std)
(5, 5, 3)
torch.Size([3, 5, 5])
torch.Size([1, 3, 5, 5])
tensor([0., 0., 0.])
torch.Size([1, 3, 25])
tensor([[[0.0039, 0.0039, 0.0039, 0.0039, 0.0039, 0.0078, 0.0078, 0.0078,
0.0078, 0.0078, 0.0118, 0.0118, 0.0118, 0.0118, 0.0118, 0.0157,
0.0157, 0.0157, 0.0157, 0.0157, 0.0196, 0.0196, 0.0196, 0.0196,
0.0196],
[0.0039, 0.0039, 0.0039, 0.0039, 0.0039, 0.0078, 0.0078, 0.0078,
0.0078, 0.0078, 0.0118, 0.0118, 0.0118, 0.0118, 0.0118, 0.0157,
0.0157, 0.0157, 0.0157, 0.0157, 0.0196, 0.0196, 0.0196, 0.0196,
0.0196],
[0.0039, 0.0039, 0.0039, 0.0039, 0.0039, 0.0078, 0.0078, 0.0078,
0.0078, 0.0078, 0.0118, 0.0118, 0.0118, 0.0118, 0.0118, 0.0157,
0.0157, 0.0157, 0.0157, 0.0157, 0.0196, 0.0196, 0.0196, 0.0196,
0.0196]]])
tensor([0.0118, 0.0118, 0.0118]) tensor([0.0057, 0.0057, 0.0057])
[Finished in 1.7s]