- reshape
input = torch.arange(12)
# tensor([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
input = torch.reshape(input, (3, 4))
# tensor([[ 0, 1, 2, 3],
# [ 4, 5, 6, 7],
# [ 8, 9, 10, 11]])
- view
返回一个张量,数据与输入张量相同,大小不同
x = torch.randn(2, 2)
# tensor([[ 1.5941, -0.1812],
# [ 0.5612, 1.5465]])
print(x.size())
# torch.Size([2, 2])
y = x.view(4) # 数据是一样的
# tensor([ 1.5941, -0.1812, 0.5612, 1.5465])
print(y.size()) # 大小可以改变
# torch.Size([4])
- flatness
input = torch.reshape(torch.arange(6), (2, 3))
# tensor([[0, 1, 2],
# [3, 4, 5]])
output = torch.flatten(input)
# tensor([0, 1, 2, 3, 4, 5])
- argmax
input = torch.tensor([[1, 3, 5],
[4, 8, 0]])
print(input.argmax(1)) # 每行最大值下标
tensor([2, 1])
- manual_seed
随机数种子,每次运行的结果都是一样的
seed = 111
torch.manual_seed(seed)
# tensor([[0.7156, 0.9140]])
- Sequential
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.model1 = Sequential(
Conv2d(3, 32, kernel_size=(5, 5), padding=2),
MaxPool2d(2),
Conv2d(32, 32, kernel_size=(5, 5), padding=2),
MaxPool2d(2),
Conv2d(32, 64, kernel_size=(5, 5), padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024, 64),
Linear(64, 10)
)
def forward(self, x):
x = self.model1(x)
return x
model = CNN()
input = torch.ones((64, 3, 32, 32))
output = model(input)
print(output.shape)
# torch.Size([64, 10])