DAY 48 随机函数与广播机制

知识点回顾:

  1. 随机张量的生成:torch.randn函数
  2. 卷积和池化的计算公式(可以不掌握,会自动计算的)
  3. pytorch的广播机制:加法和乘法的广播机制

ps:numpy运算也有类似的广播机制,基本一致

作业:自己多借助ai举几个例子帮助自己理解即可

import torch
import numpy as np

# ================ 随机张量生成:torch.randn函数 ================
print("\n===== 随机张量生成示例 =====")

# 基础用法:生成指定形状的张量
tensor = torch.randn(3, 4)
print("形状为(3,4)的随机张量:\n", tensor)

# 结合维度扩展:生成高维张量
batch_tensor = torch.randn(2, 3, 4)
print("形状为(2,3,4)的随机张量(前两个元素):\n", batch_tensor[:2])

# 与设备交互:指定张量存储位置
if torch.cuda.is_available():
    gpu_tensor = torch.randn(2, 2, device='cuda')
    print("GPU上的随机张量:\n", gpu_tensor)

# 生成指定均值和方差的张量
custom_tensor = 5 + 2 * torch.randn(3, 3)
print("自定义分布的随机张量(均值5,标准差2):\n", custom_tensor)

# 生成固定随机数(用于实验可复现性)
torch.manual_seed(42)
fixed_tensor = torch.randn(2, 2)
print("固定随机种子的张量:\n", fixed_tensor)


# ================ PyTorch广播机制:加法与乘法 ================
print("\n===== 广播机制示例 =====")

# 示例1:一维张量加法
a = torch.tensor([1, 2, 3])  # 形状(3,)
b = torch.tensor([4])       # 形状(1,)
c = a + b                   # 等价于 a + [4, 4, 4]
print("一维加法广播结果:", c)  # 输出:tensor([5, 6, 7])

# 示例2:二维张量乘法(矩阵与向量)
matrix = torch.randn(3, 4)
vector = torch.randn(4)
result = matrix * vector  # 等价于矩阵每行与vector对应元素相乘
print("二维乘法广播结果形状:", result.shape)  # 输出:torch.Size([3, 4])

# 示例3:高维张量广播(三维+二维)
tensor1 = torch.randn(2, 1, 4)
tensor2 = torch.randn(3, 4)
result = tensor1 + tensor2  # 广播后形状:(2, 3, 4)
print("三维+二维广播结果形状:", result.shape)  # 输出:torch.Size([2, 3, 4])

# 多维张量与标量的运算
tensor = torch.randn(2, 3, 4)
result = tensor + 10  # 标量10自动广播为与tensor相同形状
print("张量+标量的结果形状:", result.shape)  # 输出:torch.Size([2, 3, 4])

# 广播机制在归一化中的应用
data = torch.randn(100, 5)
means = data.mean(dim=0)
stds = data.std(dim=0)
normalized_data = (data - means) / stds  # 自动广播(5,)为(100, 5)
print("标准化后数据形状:", normalized_data.shape)  # 输出:torch.Size([100, 5])


# ================ 广播机制与Numpy的一致性 ================
print("\n===== Numpy与PyTorch广播对比 =====")

# Numpy与PyTorch广播机制对比验证
np_a = np.random.randn(3, 1)  # 形状(3, 1)
np_b = np.random.randn(1, 4)  # 形状(1, 4)
np_result = np_a + np_b      # 结果形状(3, 4)

torch_a = torch.from_numpy(np_a)
torch_b = torch.from_numpy(np_b)
torch_result = torch_a + torch_b

print("Numpy结果形状:", np_result.shape)      # 输出:(3, 4)
print("PyTorch结果形状:", torch_result.shape)  # 输出:torch.Size([3, 4])


# ================ 广播机制错误案例分析 ================
print("\n===== 广播错误案例 =====")

# 错误案例:形状不兼容导致广播失败
tensor_a = torch.randn(3, 4)
tensor_b = torch.randn(2, 3)

try:
    result = tensor_a + tensor_b
except RuntimeError as e:
    print(f"广播错误:{e}")
    # 输出:The size of tensor a (4) must match the size of tensor b (3) at non-singleton dimension 1    

@浙大疏锦行

你可能感兴趣的:(Python入门(坚持),人工智能)