2.预备知识

2.预备知识

2.1 数据操作

1
2
3
import torch
x = torch.arange(12)
x
tensor([ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11])
1
x.shape
torch.Size([12])
1
x.numel()
12
1
2
X=x.reshape(3,4)
X
tensor([[ 0,  1,  2,  3],
        [ 4,  5,  6,  7],
        [ 8,  9, 10, 11]])
1
X.shape
torch.Size([3, 4])
1
2
Y=x.reshape(-1,4)
Y
tensor([[ 0,  1,  2,  3],
        [ 4,  5,  6,  7],
        [ 8,  9, 10, 11]])
1
2
Z=x.reshape(3,-1)
Z
tensor([[ 0,  1,  2,  3],
        [ 4,  5,  6,  7],
        [ 8,  9, 10, 11]])
1
torch.zeros((2,3,4))
tensor([[[0., 0., 0., 0.],
         [0., 0., 0., 0.],
         [0., 0., 0., 0.]],

        [[0., 0., 0., 0.],
         [0., 0., 0., 0.],
         [0., 0., 0., 0.]]])
1
torch.ones((2,3,4))
tensor([[[1., 1., 1., 1.],
         [1., 1., 1., 1.],
         [1., 1., 1., 1.]],

        [[1., 1., 1., 1.],
         [1., 1., 1., 1.],
         [1., 1., 1., 1.]]])
1
torch.randn(3,4)
tensor([[ 1.6438, -1.2879,  0.2324,  0.2719],
        [-0.6636,  0.9939, -0.8435, -1.0906],
        [-0.5617,  0.2107, -0.9530,  0.7362]])
1
2
3
x=torch.tensor([1.0,2,4,8])
y=torch.tensor([2,2,2,2])
x+y,x-y,x*y,x/y,x**y
(tensor([ 3.,  4.,  6., 10.]),
 tensor([-1.,  0.,  2.,  6.]),
 tensor([ 2.,  4.,  8., 16.]),
 tensor([0.5000, 1.0000, 2.0000, 4.0000]),
 tensor([ 1.,  4., 16., 64.]))
1
torch.exp(x)
tensor([2.7183e+00, 7.3891e+00, 5.4598e+01, 2.9810e+03])
1
2
X=torch.arange(12,dtype=torch.float32).reshape((3,4))
X
tensor([[ 0.,  1.,  2.,  3.],
        [ 4.,  5.,  6.,  7.],
        [ 8.,  9., 10., 11.]])
1
2
Y=torch.tensor([[2.0,1,4,3],[1,2,3,4],[4,3,2,1]])
Y
tensor([[2., 1., 4., 3.],
        [1., 2., 3., 4.],
        [4., 3., 2., 1.]])
1
torch.cat((X,Y),dim=0)
tensor([[ 0.,  1.,  2.,  3.],
        [ 4.,  5.,  6.,  7.],
        [ 8.,  9., 10., 11.],
        [ 2.,  1.,  4.,  3.],
        [ 1.,  2.,  3.,  4.],
        [ 4.,  3.,  2.,  1.]])
1
torch.cat((X,Y),dim=1)
tensor([[ 0.,  1.,  2.,  3.,  2.,  1.,  4.,  3.],
        [ 4.,  5.,  6.,  7.,  1.,  2.,  3.,  4.],
        [ 8.,  9., 10., 11.,  4.,  3.,  2.,  1.]])
1
X==Y,X<Y
(tensor([[False,  True, False,  True],
         [False, False, False, False],
         [False, False, False, False]]),
 tensor([[ True, False,  True, False],
         [False, False, False, False],
         [False, False, False, False]]))
1
X.sum()
tensor(66.)
1
2
3
a=torch.arange(6).reshape(3,2,1)
b=torch.arange(2).reshape(1,2)
a,b
(tensor([[[0],
          [1]],
 
         [[2],
          [3]],
 
         [[4],
          [5]]]),
 tensor([[0, 1]]))
1
2
c=a+b
c
tensor([[[0, 1],
         [1, 2]],

        [[2, 3],
         [3, 4]],

        [[4, 5],
         [5, 6]]])
1
c[0]
tensor([[0, 1],
        [1, 2]])
1
X[-1],X[1:3]
(tensor([ 8.,  9., 10., 11.]),
 tensor([[ 4.,  5.,  6.,  7.],
         [ 8.,  9., 10., 11.]]))
1
2
X[1,2]=9
X
tensor([[ 0.,  1.,  2.,  3.],
        [ 4.,  5.,  9.,  7.],
        [ 8.,  9., 10., 11.]])
1
2
X[0:2,:]=12
X
tensor([[12., 12., 12., 12.],
        [12., 12., 12., 12.],
        [ 8.,  9., 10., 11.]])
1
2
3
before=id(Y)
Y=Y+X
id(Y)==before
False
1
2
Z=torch.zeros_like(Y)
Z
tensor([[0., 0., 0., 0.],
        [0., 0., 0., 0.],
        [0., 0., 0., 0.]])
1
print('id(Z):',id(Z))
id(Z): 3055861362752
1
2
Z[:]=X+Y
print('id(Z):',id(Z))
id(Z): 3055861362752
1
2
3
before=id(X)
X+=Y
id(X)==before
True
1
2
3
A=X.numpy()
B=torch.tensor(A)
type(A),type(B)
(numpy.ndarray, torch.Tensor)
1
A,B
(array([[26., 25., 28., 27.],
        [25., 26., 27., 28.],
        [20., 21., 22., 23.]], dtype=float32),
 tensor([[26., 25., 28., 27.],
         [25., 26., 27., 28.],
         [20., 21., 22., 23.]]))
1
2
a=torch.tensor([3.5])
a,a.item(),float(a),int(a)
(tensor([3.5000]), 3.5, 3.5, 3)
1
2
3
x = torch.arange(12)
X=x.reshape(3,2,2)
X
tensor([[[ 0,  1],
         [ 2,  3]],

        [[ 4,  5],
         [ 6,  7]],

        [[ 8,  9],
         [10, 11]]])

2.2 数据预处理

1
2
3
4
5
6
7
8
9
10
import os

os.makedirs(os.path.join('..', 'data'), exist_ok=True)
data_file = os.path.join('..', 'data', 'house_tiny.csv')
with open(data_file, 'w') as f:
f.write('NumRooms,Alley,Price\n') # 列名
f.write('NA,Pave,127500\n') # 每行表示一个数据样本
f.write('2,NA,106000\n')
f.write('4,NA,178100\n')
f.write('NA,NA,140000\n')
1
2
3
4
import pandas as pd

data = pd.read_csv(data_file)
print(data)
   NumRooms Alley   Price
0       NaN  Pave  127500
1       2.0   NaN  106000
2       4.0   NaN  178100
3       NaN   NaN  140000
1
2
3
inputs, outputs = data.iloc[:, 0:2], data.iloc[:, 2]
inputs = inputs.fillna(inputs.mean())
print(inputs)
   NumRooms Alley
0       3.0  Pave
1       2.0   NaN
2       4.0   NaN
3       3.0   NaN
1
2
3
inputs = pd.get_dummies(inputs)
#inputs = pd.get_dummies(inputs, dummy_na=True)
print(inputs)
   NumRooms  Alley_Pave  Alley_nan
0       3.0           1          0
1       2.0           0          1
2       4.0           0          1
3       3.0           0          1
1
2
3
4
5
6
import torch

X = torch.tensor(inputs.to_numpy(dtype=float))
y = torch.tensor(outputs.to_numpy(dtype=float))
#X,y = torch.tensor(inputs.values),torch.tensor(outputs.values)
X, y
(tensor([[3., 1., 0.],
         [2., 0., 1.],
         [4., 0., 1.],
         [3., 0., 1.]], dtype=torch.float64),
 tensor([127500., 106000., 178100., 140000.], dtype=torch.float64))

2.3 线性代数

1
2
3
4
5
6
import torch

x = torch.tensor(3.0)
y = torch.tensor(2.0)

x + y, x * y, x / y, x**y
(tensor(5.), tensor(6.), tensor(1.5000), tensor(9.))
1
2
x=torch.arange(4)
x
tensor([0, 1, 2, 3])
1
x[-1]
tensor(3)
1
len(x)
4
1
x.shape
torch.Size([4])
1
2
A=torch.arange(20).reshape(5,4)
A
tensor([[ 0,  1,  2,  3],
        [ 4,  5,  6,  7],
        [ 8,  9, 10, 11],
        [12, 13, 14, 15],
        [16, 17, 18, 19]])
1
A.T
tensor([[ 0,  4,  8, 12, 16],
        [ 1,  5,  9, 13, 17],
        [ 2,  6, 10, 14, 18],
        [ 3,  7, 11, 15, 19]])
1
2
B=torch.tensor([[1,2,3],[2,0,4],[3,4,5]])
B
tensor([[1, 2, 3],
        [2, 0, 4],
        [3, 4, 5]])
1
B==B.T
tensor([[True, True, True],
        [True, True, True],
        [True, True, True]])
1
2
X=torch.arange(24).reshape(2,3,4)
X
tensor([[[ 0,  1,  2,  3],
         [ 4,  5,  6,  7],
         [ 8,  9, 10, 11]],

        [[12, 13, 14, 15],
         [16, 17, 18, 19],
         [20, 21, 22, 23]]])
1
2
3
A=torch.arange(20,dtype=torch.float32).reshape(5,4)
B=A.clone()
A,A+B
(tensor([[ 0.,  1.,  2.,  3.],
         [ 4.,  5.,  6.,  7.],
         [ 8.,  9., 10., 11.],
         [12., 13., 14., 15.],
         [16., 17., 18., 19.]]),
 tensor([[ 0.,  2.,  4.,  6.],
         [ 8., 10., 12., 14.],
         [16., 18., 20., 22.],
         [24., 26., 28., 30.],
         [32., 34., 36., 38.]]))
1
A*B
tensor([[  0.,   1.,   4.,   9.],
        [ 16.,  25.,  36.,  49.],
        [ 64.,  81., 100., 121.],
        [144., 169., 196., 225.],
        [256., 289., 324., 361.]])
1
2
3
a=2
X=torch.arange(24).reshape(2,3,4)
a+X,(a*X).shape
(tensor([[[ 2,  3,  4,  5],
          [ 6,  7,  8,  9],
          [10, 11, 12, 13]],
 
         [[14, 15, 16, 17],
          [18, 19, 20, 21],
          [22, 23, 24, 25]]]),
 torch.Size([2, 3, 4]))
1
2
x=torch.arange(4,dtype=torch.float32)
x,x.sum()
(tensor([0., 1., 2., 3.]), tensor(6.))
1
A.shape,A.sum()
(torch.Size([5, 4]), tensor(190.))
1
2
A_sum_axis0=A.sum(axis=0)
A_sum_axis0,A_sum_axis0.shape
(tensor([40., 45., 50., 55.]), torch.Size([4]))
1
2
A_sum_axis1=A.sum(axis=1)
A_sum_axis1,A_sum_axis1.shape
(tensor([ 6., 22., 38., 54., 70.]), torch.Size([5]))
1
A.sum(axis=[0,1])
tensor(190.)
1
A.mean(),A.sum(),A.numel(),A.sum()/A.numel()
(tensor(9.5000), tensor(190.), 20, tensor(9.5000))
1
A.mean(axis=0),A.sum(axis=0)/A.shape[0]
(tensor([ 8.,  9., 10., 11.]), tensor([ 8.,  9., 10., 11.]))
1
2
sum_A=A.sum(axis=1,keepdims=True)
sum_A
tensor([[ 6.],
        [22.],
        [38.],
        [54.],
        [70.]])
1
A / sum_A
tensor([[0.0000, 0.1667, 0.3333, 0.5000],
        [0.1818, 0.2273, 0.2727, 0.3182],
        [0.2105, 0.2368, 0.2632, 0.2895],
        [0.2222, 0.2407, 0.2593, 0.2778],
        [0.2286, 0.2429, 0.2571, 0.2714]])
1
A.cumsum(axis=0)
tensor([[ 0.,  1.,  2.,  3.],
        [ 4.,  6.,  8., 10.],
        [12., 15., 18., 21.],
        [24., 28., 32., 36.],
        [40., 45., 50., 55.]])
1
2
y=torch.ones(4,dtype=torch.float32)
x,y,torch.dot(x,y)
(tensor([0., 1., 2., 3.]), tensor([1., 1., 1., 1.]), tensor(6.))
1
torch.sum(x*y)
tensor(6.)
1
A.shape,x.shape,torch.mv(A,x)
(torch.Size([5, 4]), torch.Size([4]), tensor([ 14.,  38.,  62.,  86., 110.]))
1
2
B=torch.ones(4,3)
torch.mm(A,B)
tensor([[ 6.,  6.,  6.],
        [22., 22., 22.],
        [38., 38., 38.],
        [54., 54., 54.],
        [70., 70., 70.]])
1
2
u=torch.tensor([3.0,-4.0])
torch.norm(u)
tensor(5.)
1
torch.abs(u).sum()
tensor(7.)
1
torch.norm(torch.ones((4,9)))
tensor(6.)
1
2
x=torch.arange(24,dtype=torch.float32).reshape((2,3,4))
x,len(x)
(tensor([[[ 0.,  1.,  2.,  3.],
          [ 4.,  5.,  6.,  7.],
          [ 8.,  9., 10., 11.]],
 
         [[12., 13., 14., 15.],
          [16., 17., 18., 19.],
          [20., 21., 22., 23.]]]),
 2)
1
x.sum(axis=0),x.sum(axis=1),x.sum(axis=2)
(tensor([[12., 14., 16., 18.],
         [20., 22., 24., 26.],
         [28., 30., 32., 34.]]),
 tensor([[12., 15., 18., 21.],
         [48., 51., 54., 57.]]),
 tensor([[ 6., 22., 38.],
         [54., 70., 86.]]))
1
2
y=torch.arange(120,dtype=torch.float32).reshape(2,3,4,5)
y
tensor([[[[  0.,   1.,   2.,   3.,   4.],
          [  5.,   6.,   7.,   8.,   9.],
          [ 10.,  11.,  12.,  13.,  14.],
          [ 15.,  16.,  17.,  18.,  19.]],

         [[ 20.,  21.,  22.,  23.,  24.],
          [ 25.,  26.,  27.,  28.,  29.],
          [ 30.,  31.,  32.,  33.,  34.],
          [ 35.,  36.,  37.,  38.,  39.]],

         [[ 40.,  41.,  42.,  43.,  44.],
          [ 45.,  46.,  47.,  48.,  49.],
          [ 50.,  51.,  52.,  53.,  54.],
          [ 55.,  56.,  57.,  58.,  59.]]],


​ [[[ 60., 61., 62., 63., 64.],
​ [ 65., 66., 67., 68., 69.],
​ [ 70., 71., 72., 73., 74.],
​ [ 75., 76., 77., 78., 79.]],

[[ 80., 81., 82., 83., 84.],
[ 85., 86., 87., 88., 89.],
[ 90., 91., 92., 93., 94.],
[ 95., 96., 97., 98., 99.]],

         [[100., 101., 102., 103., 104.],
          [105., 106., 107., 108., 109.],
          [110., 111., 112., 113., 114.],
          [115., 116., 117., 118., 119.]]]])
1
2
3
4
5
6
z=torch.linalg.norm(y)#torch.linalg.norm函数可用于求解多轴张量的类L2范数,要求张量各元素数据类型为浮点数或者复数
#z=torch.linalg.norm(input,p,dim)
#input:输入张量。它的数据类型必须是浮点型或复数型。对于复数的输入,范数使用每个元素的绝对值。注意,输入张量中元素的数据类型一定得是浮点型或者是复数哦,不然就会报错!这个就是主要变化,其次是不能使用 input.norm
#p:范数的阶数。默认是2阶—“fro”,也就是弗罗贝尼乌斯范数(Frobenius norm)。如果输入p=某个正整数,则求解对应的p阶范数。其公式为 sum(abs(x)**p)**(1./p)。
#dim:对输入的张量计算其指定维度(如dim=1,则表示计算第二个维度)上所有元素的范数。如果不对dim进行赋值,则会计算输入张量所有维度上的范数。当然如果指定维数不在输入张量的尺寸之内,将出现错误。
z
tensor(754.2015)

2.4 微积分

1
2
3
4
5
6
7
8
9
10
11
12
13
14
import numpy as np
from matplotlib_inline import backend_inline
from d2l import torch as d2l

def f(x):
return 3*x**2-4*x

def numerical_lim(f,x,h):
return (f(x+h)-f(x))/h

h=0.1
for i in range(5):
print(f'h={h:.5f},numerical limit={numerical_lim(f,1,h):.5f}')
h*=0.1
h=0.10000,numerical limit=2.30000
h=0.01000,numerical limit=2.03000
h=0.00100,numerical limit=2.00300
h=0.00010,numerical limit=2.00030
h=0.00001,numerical limit=2.00003
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
#三个用于图形配置的函数
def use_svg_display(): #@save
##@save标记可将对应函数/类/语句保存在d2l包中,以后无需定义就可以直接调用;e.g:d2l.use_svg_display()
#使用svg格式在Jupyter中显示绘图
backend_inline.set_matplotlib_formats('svg')

from matplotlib import pyplot as plt
def set_figsize(figsize=(3.5,2.5)): #@save
#设置matplotlib的图表大小
use_svg_display()
d2l.plt.rcParams['figure.figsize']=figsize

#@save
def set_axes(axes,xlabel,ylabel,xlim,ylim,xscale,yscale,legend):
#设置matplotlib的轴
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
axes.set_xscale(xscale)
axes.set_yscale(yscale)
axes.set_xlim(xlim)
axes.set_ylim(ylim)
if legend:
axes.legend(legend)
axes.grid()

#plot函数:可绘制多条曲线
#@save
def plot(X,Y=None,xlabel=None,ylabel=None,legend=None,xlim=None,ylim=None,xscale='linear',yscale='linear',fmts=('-','m--','g-.','r:'),figsize=(3.5,2.5),axes=None):
#绘制数据点
if legend is None:
legend=[]

set_figsize(figsize)
axes=axes if axes else d2l.plt.gca()

#如果X有一个轴,输出True
def has_one_axis(X):
return(hasattr(X,"ndim") and X.ndim==1 or isinstance(X,list) and not hasattr(X[0],"__len__"))

if has_one_axis(X):
X=[X]
if Y is None:
X,Y=[[]]*len(X),X
elif has_one_axis(Y):
X=X*len(Y)
if len(X)!=len(Y):
X=X*len(Y)
axes.cla()
for x,y,fmt in zip(X,Y,fmts):
if len(x):
axes.plot(x,y,fmt)
else:
axes.plot(y,fmt)
set_axes(axes,xlabel,ylabel,xlim,ylim,xscale,yscale,legend)
1
2
x=np.arange(0,3,0.1)
plot(x,[f(x),2*x-3],'x','f(x)',legend=['f(x)','Tangent line(x=1)'])


svg

1
2
3
def g(x):
return x**3-(1/x)
plot(x,[g(x),4*x-4],'x','g(x)',legend=['g(x)','Tangent line(x=1)'])
F:\user\Temp\ipykernel_25528\1423519574.py:2: RuntimeWarning: divide by zero encountered in true_divide
  return x**3-(1/x)

svg

2.5 自动微分

1
2
3
4
import torch

x=torch.arange(4.0)
x
tensor([0., 1., 2., 3.])
1
2
y=2*torch.dot(x,x)
y
tensor(28.)
1
2
x.requires_grad_(True) #等价于x=torch.arange(4.0,requires_grad=True)
x.grad #默认值为None
1
2
y=2*torch.dot(x,x)
y
tensor(28., grad_fn=<MulBackward0>)
1
2
y.backward()#通过调用反向传播函数自动计算y关于x每个分量的梯度
x.grad
tensor([ 0.,  4.,  8., 12.])
1
x.grad==4*x
tensor([True, True, True, True])
1
2
3
4
5
x.grad.zero_()#在默认情况下,PyTorch会累积梯度,我们需要清除之前的值

y=x.sum()
y.backward()
x.grad
tensor([1., 1., 1., 1.])
1
2
3
4
5
6
#对非标量变量:不计算微分矩阵,而是单独计算批量中每个样本的偏导数之和
#对[非标量]调用backward需要传入一个gradient参数,该参数指定微分函数关于self的梯度
x.grad.zero_()
y=x*x
y.sum().backward()#等价于y.backward(torch.ones(len(x)))——传递1的梯度合适:只求偏导数的和
y,x.grad
(tensor([0., 1., 4., 9.], grad_fn=<MulBackward0>), tensor([0., 2., 4., 6.]))
1
2
3
4
5
6
7
x.grad.zero_()
y=x*x
u=y.detach()#分离变量(复制副本,保留计算结果,后续处理的u不带有y除数值外的其他性质)
z=u*x

z.sum().backward()
x.grad==u
tensor([True, True, True, True])
1
2
3
x.grad.zero_()
y.sum().backward()
x.grad==2*x
tensor([True, True, True, True])
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
def f(a):
b=a*2
while b.norm()<1000:
b=b*2
if b.sum()>0:
c=b
else:
c=100*b
return c

a=torch.randn(size=(),requires_grad=True)
d=f(a)
d.backward()#注意:运行backward函数会自动清除计算图;但可通过在第一次backward中加一句retain_grad=True,即d.backward(retain_graph=True),意思为一直保留计算图

a,d,a.grad==d/a,a.grad
(tensor(0.1050, requires_grad=True),
 tensor(1719.5204, grad_fn=<MulBackward0>),
 tensor(True),
 tensor(16384.))

2.6 概率

1
2
3
4
5
6
import torch
from torch.distributions import multinomial#multinomial 多项分布
from d2l import torch as d2l

fair_probs=torch.ones([6])/6
fair_probs,multinomial.Multinomial(1,fair_probs).sample()
(tensor([0.1667, 0.1667, 0.1667, 0.1667, 0.1667, 0.1667]),
 tensor([0., 0., 0., 0., 1., 0.]))
1
multinomial.Multinomial(10,fair_probs).sample()
tensor([2., 0., 1., 3., 1., 3.])
1
2
counts=multinomial.Multinomial(1000,fair_probs).sample()#将结果储存为float32以进行除法
counts/1000#相对频率作为估计值
tensor([0.1640, 0.1610, 0.1720, 0.1730, 0.1610, 0.1690])
1
2
3
4
5
6
7
8
9
10
11
counts=multinomial.Multinomial(10,fair_probs).sample((500,))
cum_counts=counts.cumsum(dim=0)#cumsum:累加函数
estimates=cum_counts/cum_counts.sum(dim=1,keepdims=True)

d2l.set_figsize((6,4.5))
for i in range(6):
d2l.plt.plot(estimates[:,i].numpy(),label=("P(die="+str(i+1)+")"))
d2l.plt.axhline(y=0.167,color='black',linestyle='dashed')
d2l.plt.gca().set_xlabel('Groups of experiments')
d2l.plt.gca().set_ylabel('Estimated probability')
d2l.plt.legend();


svg

2.7 查阅文档

1
2
import torch
print(dir(torch.distributions))
['AbsTransform', 'AffineTransform', 'Bernoulli', 'Beta', 'Binomial', 'CatTransform', 'Categorical', 'Cauchy', 'Chi2', 'ComposeTransform', 'ContinuousBernoulli', 'CorrCholeskyTransform', 'CumulativeDistributionTransform', 'Dirichlet', 'Distribution', 'ExpTransform', 'Exponential', 'ExponentialFamily', 'FisherSnedecor', 'Gamma', 'Geometric', 'Gumbel', 'HalfCauchy', 'HalfNormal', 'Independent', 'IndependentTransform', 'Kumaraswamy', 'LKJCholesky', 'Laplace', 'LogNormal', 'LogisticNormal', 'LowRankMultivariateNormal', 'LowerCholeskyTransform', 'MixtureSameFamily', 'Multinomial', 'MultivariateNormal', 'NegativeBinomial', 'Normal', 'OneHotCategorical', 'OneHotCategoricalStraightThrough', 'Pareto', 'Poisson', 'PowerTransform', 'RelaxedBernoulli', 'RelaxedOneHotCategorical', 'ReshapeTransform', 'SigmoidTransform', 'SoftmaxTransform', 'SoftplusTransform', 'StackTransform', 'StickBreakingTransform', 'StudentT', 'TanhTransform', 'Transform', 'TransformedDistribution', 'Uniform', 'VonMises', 'Weibull', 'Wishart', '__all__', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__path__', '__spec__', 'bernoulli', 'beta', 'biject_to', 'binomial', 'categorical', 'cauchy', 'chi2', 'constraint_registry', 'constraints', 'continuous_bernoulli', 'dirichlet', 'distribution', 'exp_family', 'exponential', 'fishersnedecor', 'gamma', 'geometric', 'gumbel', 'half_cauchy', 'half_normal', 'identity_transform', 'independent', 'kl', 'kl_divergence', 'kumaraswamy', 'laplace', 'lkj_cholesky', 'log_normal', 'logistic_normal', 'lowrank_multivariate_normal', 'mixture_same_family', 'multinomial', 'multivariate_normal', 'negative_binomial', 'normal', 'one_hot_categorical', 'pareto', 'poisson', 'register_kl', 'relaxed_bernoulli', 'relaxed_categorical', 'studentT', 'transform_to', 'transformed_distribution', 'transforms', 'uniform', 'utils', 'von_mises', 'weibull', 'wishart']
1
help(torch.ones)
Help on built-in function ones in module torch:

ones(...)
    ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
    
    Returns a tensor filled with the scalar value `1`, with the shape defined
    by the variable argument :attr:`size`.
    
    Args:
        size (int...): a sequence of integers defining the shape of the output tensor.
            Can be a variable number of arguments or a collection like a list or tuple.
    
    Keyword arguments:
        out (Tensor, optional): the output tensor.
        dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
            Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
        layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
            Default: ``torch.strided``.
        device (:class:`torch.device`, optional): the desired device of returned tensor.
            Default: if ``None``, uses the current device for the default tensor type
            (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU
            for CPU tensor types and the current CUDA device for CUDA tensor types.
        requires_grad (bool, optional): If autograd should record operations on the
            returned tensor. Default: ``False``.
    
    Example::
    
        >>> torch.ones(2, 3)
        tensor([[ 1.,  1.,  1.],
                [ 1.,  1.,  1.]])
    
        >>> torch.ones(5)
        tensor([ 1.,  1.,  1.,  1.,  1.])

1
torch.ones(4)
tensor([1., 1., 1., 1.])
1
?
1
list?
1
help(list)
Help on class list in module builtins:

class list(object)
 |  list(iterable=(), /)
 |  
 |  Built-in mutable sequence.
 |  
 |  If no argument is given, the constructor creates a new empty list.
 |  The argument must be an iterable if specified.
 |  
 |  Methods defined here:
 |  
 |  __add__(self, value, /)
 |      Return self+value.
 |  
 |  __contains__(self, key, /)
 |      Return key in self.
 |  
 |  __delitem__(self, key, /)
 |      Delete self[key].
 |  
 |  __eq__(self, value, /)
 |      Return self==value.
 |  
 |  __ge__(self, value, /)
 |      Return self>=value.
 |  
 |  __getattribute__(self, name, /)
 |      Return getattr(self, name).
 |  
 |  __getitem__(...)
 |      x.__getitem__(y) <==> x[y]
 |  
 |  __gt__(self, value, /)
 |      Return self>value.
 |  
 |  __iadd__(self, value, /)
 |      Implement self+=value.
 |  
 |  __imul__(self, value, /)
 |      Implement self*=value.
 |  
 |  __init__(self, /, *args, **kwargs)
 |      Initialize self.  See help(type(self)) for accurate signature.
 |  
 |  __iter__(self, /)
 |      Implement iter(self).
 |  
 |  __le__(self, value, /)
 |      Return self<=value.
 |  
 |  __len__(self, /)
 |      Return len(self).
 |  
 |  __lt__(self, value, /)
 |      Return self<value.
 |  
 |  __mul__(self, value, /)
 |      Return self*value.
 |  
 |  __ne__(self, value, /)
 |      Return self!=value.
 |  
 |  __repr__(self, /)
 |      Return repr(self).
 |  
 |  __reversed__(self, /)
 |      Return a reverse iterator over the list.
 |  
 |  __rmul__(self, value, /)
 |      Return value*self.
 |  
 |  __setitem__(self, key, value, /)
 |      Set self[key] to value.
 |  
 |  __sizeof__(self, /)
 |      Return the size of the list in memory, in bytes.
 |  
 |  append(self, object, /)
 |      Append object to the end of the list.
 |  
 |  clear(self, /)
 |      Remove all items from list.
 |  
 |  copy(self, /)
 |      Return a shallow copy of the list.
 |  
 |  count(self, value, /)
 |      Return number of occurrences of value.
 |  
 |  extend(self, iterable, /)
 |      Extend list by appending elements from the iterable.
 |  
 |  index(self, value, start=0, stop=9223372036854775807, /)
 |      Return first index of value.
 |      
 |      Raises ValueError if the value is not present.
 |  
 |  insert(self, index, object, /)
 |      Insert object before index.
 |  
 |  pop(self, index=-1, /)
 |      Remove and return item at index (default last).
 |      
 |      Raises IndexError if list is empty or index is out of range.
 |  
 |  remove(self, value, /)
 |      Remove first occurrence of value.
 |      
 |      Raises ValueError if the value is not present.
 |  
 |  reverse(self, /)
 |      Reverse *IN PLACE*.
 |  
 |  sort(self, /, *, key=None, reverse=False)
 |      Sort the list in ascending order and return None.
 |      
 |      The sort is in-place (i.e. the list itself is modified) and stable (i.e. the
 |      order of two equal elements is maintained).
 |      
 |      If a key function is given, apply it once to each list item and sort them,
 |      ascending or descending, according to their function values.
 |      
 |      The reverse flag can be set to sort in descending order.
 |  
 |  ----------------------------------------------------------------------
 |  Class methods defined here:
 |  
 |  __class_getitem__(...) from builtins.type
 |      See PEP 585
 |  
 |  ----------------------------------------------------------------------
 |  Static methods defined here:
 |  
 |  __new__(*args, **kwargs) from builtins.type
 |      Create and return a new object.  See help(type) for accurate signature.
 |  
 |  ----------------------------------------------------------------------
 |  Data and other attributes defined here:
 |  
 |  __hash__ = None

1
list??
作者

明诚

发布于

2023-09-27

更新于

2023-10-05

许可协议

评论