loaderr

Traceback (most recent call last):

File "/ai/zhdata/lyp/multiyolov5_point_608_736/train_608_736.py", line 718, in <module>

train(hyp, opt, device, tb_writer)

File "/ai/zhdata/lyp/multiyolov5_point_608_736/train_608_736.py", line 166, in train

ema.ema.load_state_dict(ckpt['ema'].float().state_dict())

File "/ai/zhdata/lyp/conda/anaconda3/envs/mmd3.0/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1482, in load_state_dict

raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(

RuntimeError: Error(s) in loading state_dict for Model:

Missing key(s) in state_dict: "model.24.m.0.cv1.conv.weight", "model.24.m.0.cv1.bn.weight", "model.24.m.0.cv1.bn.bias", "model.24.m.0.cv1.bn.running_mean", "model.24.m.0.cv1.bn.running_var", "model.24.m.0.cv2.conv.weight", "model.24.m.0.cv2.bn.weight", "model.24.m.0.cv2.bn.bias", "model.24.m.0.cv2.bn.running_mean", "model.24.m.0.cv2.bn.running_var", "model.24.m.0.cv3.conv.weight", "model.24.m.0.cv3.bn.weight", "model.24.m.0.cv3.bn.bias", "model.24.m.0.cv3.bn.running_mean", "model.24.m.0.cv3.bn.running_var", "model.24.m.0.m.0.cv1.conv.weight", "model.24.m.0.m.0.cv1.bn.weight", "model.24.m.0.m.0.cv1.bn.bias", "model.24.m.0.m.0.cv1.bn.running_mean", "model.24.m.0.m.0.cv1.bn.running_var", "model.24.m.0.m.0.cv2.conv.weight", "model.24.m.0.m.0.cv2.bn.weight", "model.24.m.0.m.0.cv2.bn.bias", "model.24.m.0.m.0.cv2.bn.running_mean", "model.24.m.0.m.0.cv2.bn.running_var", "model.24.m.2.m.0.cv1.conv.weight", "model.24.m.2.m.0.cv1.bn.weight", "model.24.m.2.m.0.cv1.bn.bias", "model.24.m.2.m.0.cv1.bn.running_mean", "model.24.m.2.m.0.cv1.bn.running_var", "model.24.m.2.m.0.cv2.conv.weight", "model.24.m.2.m.0.cv2.bn.weight", "model.24.m.2.m.0.cv2.bn.bias", "model.24.m.2.m.0.cv2.bn.running_mean", "model.24.m.2.m.0.cv2.bn.running_var", "model.24.m.4.cv1.conv.weight", "model.24.m.4.cv1.bn.weight", "model.24.m.4.cv1.bn.bias", "model.24.m.4.cv1.bn.running_mean", "model.24.m.4.cv1.bn.running_var", "model.24.m.4.cv2.conv.weight", "model.24.m.4.cv2.bn.weight", "model.24.m.4.cv2.bn.bias", "model.24.m.4.cv2.bn.running_mean", "model.24.m.4.cv2.bn.running_var", "model.24.m.4.cv3.conv.weight", "model.24.m.4.cv3.bn.weight", "model.24.m.4.cv3.bn.bias", "model.24.m.4.cv3.bn.running_mean", "model.24.m.4.cv3.bn.running_var", "model.24.m.4.m.0.cv1.conv.weight", "model.24.m.4.m.0.cv1.bn.weight", "model.24.m.4.m.0.cv1.bn.bias", "model.24.m.4.m.0.cv1.bn.running_mean", "model.24.m.4.m.0.cv1.bn.running_var", "model.24.m.4.m.0.cv2.conv.weight", "model.24.m.4.m.0.cv2.bn.weight", "model.24.m.4.m.0.cv2.bn.bias", "model.24.m.4.m.0.cv2.bn.running_mean", "model.24.m.4.m.0.cv2.bn.running_var", "model.24.m.5.weight", "model.24.m.5.bias", "model.24.decoder1.3.cv1.conv.weight", "model.24.decoder1.3.cv1.bn.weight", "model.24.decoder1.3.cv1.bn.bias", "model.24.decoder1.3.cv1.bn.running_mean", "model.24.decoder1.3.cv1.bn.running_var", "model.24.decoder1.3.cv2.conv.weight", "model.24.decoder1.3.cv2.bn.weight", "model.24.decoder1.3.cv2.bn.bias", "model.24.decoder1.3.cv2.bn.running_mean", "model.24.decoder1.3.cv2.bn.running_var", "model.24.decoder1.3.cv3.conv.weight", "model.24.decoder1.3.cv3.bn.weight", "model.24.decoder1.3.cv3.bn.bias", "model.24.decoder1.3.cv3.bn.running_mean", "model.24.decoder1.3.cv3.bn.running_var", "model.24.decoder1.3.m.0.cv1.conv.weight", "model.24.decoder1.3.m.0.cv1.bn.weight", "model.24.decoder1.3.m.0.cv1.bn.bias", "model.24.decoder1.3.m.0.cv1.bn.running_mean", "model.24.decoder1.3.m.0.cv1.bn.running_var", "model.24.decoder1.3.m.0.cv2.conv.weight", "model.24.decoder1.3.m.0.cv2.bn.weight", "model.24.decoder1.3.m.0.cv2.bn.bias", "model.24.decoder1.3.m.0.cv2.bn.running_mean", "model.24.decoder1.3.m.0.cv2.bn.running_var", "model.24.decoder1.5.cv1.conv.weight", "model.24.decoder1.5.cv1.bn.weight", "model.24.decoder1.5.cv1.bn.bias", "model.24.decoder1.5.cv1.bn.running_mean", "model.24.decoder1.5.cv1.bn.running_var", "model.24.decoder1.5.cv2.conv.weight", "model.24.decoder1.5.cv2.bn.weight", "model.24.decoder1.5.cv2.bn.bias", "model.24.decoder1.5.cv2.bn.running_mean", "model.24.decoder1.5.cv2.bn.running_var", "model.24.decoder1.5.cv3.conv.weight", "model.24.decoder1.5.cv3.bn.weight", "model.24.decoder1.5.cv3.bn.bias", "model.24.decoder1.5.cv3.bn.running_mean", "model.24.decoder1.5.cv3.bn.running_var", "model.24.decoder1.5.m.0.cv1.conv.weight", "model.24.decoder1.5.m.0.cv1.bn.weight", "model.24.decoder1.5.m.0.cv1.bn.bias", "model.24.decoder1.5.m.0.cv1.bn.running_mean", "model.24.decoder1.5.m.0.cv1.bn.running_var", "model.24.decoder1.5.m.0.cv2.conv.weight", "model.24.decoder1.5.m.0.cv2.bn.weight", "model.24.decoder1.5.m.0.cv2.bn.bias", "model.24.decoder1.5.m.0.cv2.bn.running_mean", "model.24.decoder1.5.m.0.cv2.bn.running_var", "model.24.decoder1.6.weight", "model.24.decoder1.6.bias", "model.24.m32.2.cv1.conv.weight", "model.24.m32.2.cv1.bn.weight", "model.24.m32.2.cv1.bn.bias", "model.24.m32.2.cv1.bn.running_mean", "model.24.m32.2.cv1.bn.running_var", "model.24.m32.2.cv2.conv.weight", "model.24.m32.2.cv2.bn.weight", "model.24.m32.2.cv2.bn.bias", "model.24.m32.2.cv2.bn.running_mean", "model.24.m32.2.cv2.bn.running_var", "model.24.m32.2.cv3.conv.weight", "model.24.m32.2.cv3.bn.weight", "model.24.m32.2.cv3.bn.bias", "model.24.m32.2.cv3.bn.running_mean", "model.24.m32.2.cv3.bn.running_var", "model.24.m32.2.m.0.cv1.conv.weight", "model.24.m32.2.m.0.cv1.bn.weight", "model.24.m32.2.m.0.cv1.bn.bias", "model.24.m32.2.m.0.cv1.bn.running_mean", "model.24.m32.2.m.0.cv1.bn.running_var", "model.24.m32.2.m.0.cv2.conv.weight", "model.24.m32.2.m.0.cv2.bn.weight", "model.24.m32.2.m.0.cv2.bn.bias", "model.24.m32.2.m.0.cv2.bn.running_mean", "model.24.m32.2.m.0.cv2.bn.running_var", "model.24.m16.0.conv.weight", "model.24.m16.0.bn.weight", "model.24.m16.0.bn.bias", "model.24.m16.0.bn.running_mean", "model.24.m16.0.bn.running_var".

Unexpected key(s) in state_dict: "model.24.m.0.conv.weight", "model.24.m.0.bn.weight", "model.24.m.0.bn.bias", "model.24.m.0.bn.running_mean", "model.24.m.0.bn.running_var", "model.24.m.0.bn.num_batches_tracked", "model.24.m.1.cv1.conv.weight", "model.24.m.1.cv1.bn.weight", "model.24.m.1.cv1.bn.bias", "model.24.m.1.cv1.bn.running_mean", "model.24.m.1.cv1.bn.running_var", "model.24.m.1.cv1.bn.num_batches_tracked", "model.24.m.1.cv2.conv.weight", "model.24.m.1.cv2.bn.weight", "model.24.m.1.cv2.bn.bias", "model.24.m.1.cv2.bn.running_mean", "model.24.m.1.cv2.bn.running_var", "model.24.m.1.cv2.bn.num_batches_tracked", "model.24.m.1.cv3.conv.weight", "model.24.m.1.cv3.bn.weight", "model.24.m.1.cv3.bn.bias", "model.24.m.1.cv3.bn.running_mean", "model.24.m.1.cv3.bn.running_var", "model.24.m.1.cv3.bn.num_batches_tracked", "model.24.m.1.m.0.cv1.conv.weight", "model.24.m.1.m.0.cv1.bn.weight", "model.24.m.1.m.0.cv1.bn.bias", "model.24.m.1.m.0.cv1.bn.running_mean", "model.24.m.1.m.0.cv1.bn.running_var", "model.24.m.1.m.0.cv1.bn.num_batches_tracked", "model.24.m.1.m.0.cv2.conv.weight", "model.24.m.1.m.0.cv2.bn.weight", "model.24.m.1.m.0.cv2.bn.bias", "model.24.m.1.m.0.cv2.bn.running_mean", "model.24.m.1.m.0.cv2.bn.running_var", "model.24.m.1.m.0.cv2.bn.num_batches_tracked", "model.24.m.2.m.cv1.conv.weight", "model.24.m.2.m.cv1.bn.weight", "model.24.m.2.m.cv1.bn.bias", "model.24.m.2.m.cv1.bn.running_mean", "model.24.m.2.m.cv1.bn.running_var", "model.24.m.2.m.cv1.bn.num_batches_tracked", "model.24.m.2.m.cv2.conv.weight", "model.24.m.2.m.cv2.bn.weight", "model.24.m.2.m.cv2.bn.bias", "model.24.m.2.m.cv2.bn.running_mean", "model.24.m.2.m.cv2.bn.running_var", "model.24.m.2.m.cv2.bn.num_batches_tracked", "model.24.m.3.weight", "model.24.m.3.bias", "model.24.decoder1.2.cv1.conv.weight", "model.24.decoder1.2.cv1.bn.weight", "model.24.decoder1.2.cv1.bn.bias", "model.24.decoder1.2.cv1.bn.running_mean", "model.24.decoder1.2.cv1.bn.running_var", "model.24.decoder1.2.cv1.bn.num_batches_tracked", "model.24.decoder1.2.cv2.conv.weight", "model.24.decoder1.2.cv2.bn.weight", "model.24.decoder1.2.cv2.bn.bias", "model.24.decoder1.2.cv2.bn.running_mean", "model.24.decoder1.2.cv2.bn.running_var", "model.24.decoder1.2.cv2.bn.num_batches_tracked", "model.24.decoder1.2.cv3.conv.weight", "model.24.decoder1.2.cv3.bn.weight", "model.24.decoder1.2.cv3.bn.bias", "model.24.decoder1.2.cv3.bn.running_mean", "model.24.decoder1.2.cv3.bn.running_var", "model.24.decoder1.2.cv3.bn.num_batches_tracked", "model.24.decoder1.2.m.cv1.conv.weight", "model.24.decoder1.2.m.cv1.bn.weight", "model.24.decoder1.2.m.cv1.bn.bias", "model.24.decoder1.2.m.cv1.bn.running_mean", "model.24.decoder1.2.m.cv1.bn.running_var", "model.24.decoder1.2.m.cv1.bn.num_batches_tracked", "model.24.decoder1.2.m.cv2.conv.weight", "model.24.decoder1.2.m.cv2.bn.weight", "model.24.decoder1.2.m.cv2.bn.bias", "model.24.decoder1.2.m.cv2.bn.running_mean", "model.24.decoder1.2.m.cv2.bn.running_var", "model.24.decoder1.2.m.cv2.bn.num_batches_tracked", "model.24.decoder1.3.weight", "model.24.decoder1.3.bias", "model.24.m8.1.cv1.conv.weight", "model.24.m8.1.cv1.bn.weight", "model.24.m8.1.cv1.bn.bias", "model.24.m8.1.cv1.bn.running_mean", "model.24.m8.1.cv1.bn.running_var", "model.24.m8.1.cv1.bn.num_batches_tracked", "model.24.m8.1.cv2.conv.weight", "model.24.m8.1.cv2.bn.weight", "model.24.m8.1.cv2.bn.bias", "model.24.m8.1.cv2.bn.running_mean", "model.24.m8.1.cv2.bn.running_var", "model.24.m8.1.cv2.bn.num_batches_tracked", "model.24.m8.1.cv3.conv.weight", "model.24.m8.1.cv3.bn.weight", "model.24.m8.1.cv3.bn.bias", "model.24.m8.1.cv3.bn.running_mean", "model.24.m8.1.cv3.bn.running_var", "model.24.m8.1.cv3.bn.num_batches_tracked", "model.24.m8.1.m.0.cv1.conv.weight", "model.24.m8.1.m.0.cv1.bn.weight", "model.24.m8.1.m.0.cv1.bn.bias", "model.24.m8.1.m.0.cv1.bn.running_mean", "model.24.m8.1.m.0.cv1.bn.running_var", "model.24.m8.1.m.0.cv1.bn.num_batches_tracked", "model.24.m8.1.m.0.cv2.conv.weight", "model.24.m8.1.m.0.cv2.bn.weight", "model.24.m8.1.m.0.cv2.bn.bias", "model.24.m8.1.m.0.cv2.bn.running_mean", "model.24.m8.1.m.0.cv2.bn.running_var", "model.24.m8.1.m.0.cv2.bn.num_batches_tracked", "model.24.m32.1.cv1.conv.weight", "model.24.m32.1.cv1.bn.weight", "model.24.m32.1.cv1.bn.bias", "model.24.m32.1.cv1.bn.running_mean", "model.24.m32.1.cv1.bn.running_var", "model.24.m32.1.cv1.bn.num_batches_tracked", "model.24.m32.1.cv2.conv.weight", "model.24.m32.1.cv2.bn.weight", "model.24.m32.1.cv2.bn.bias", "model.24.m32.1.cv2.bn.running_mean", "model.24.m32.1.cv2.bn.running_var", "model.24.m32.1.cv2.bn.num_batches_tracked", "model.24.m32.1.cv3.conv.weight", "model.24.m32.1.cv3.bn.weight", "model.24.m32.1.cv3.bn.bias", "model.24.m32.1.cv3.bn.running_mean", "model.24.m32.1.cv3.bn.running_var", "model.24.m32.1.cv3.bn.num_batches_tracked", "model.24.m32.1.m.0.cv1.conv.weight", "model.24.m32.1.m.0.cv1.bn.weight", "model.24.m32.1.m.0.cv1.bn.bias", "model.24.m32.1.m.0.cv1.bn.running_mean", "model.24.m32.1.m.0.cv1.bn.running_var", "model.24.m32.1.m.0.cv1.bn.num_batches_tracked", "model.24.m32.1.m.0.cv2.conv.weight", "model.24.m32.1.m.0.cv2.bn.weight", "model.24.m32.1.m.0.cv2.bn.bias", "model.24.m32.1.m.0.cv2.bn.running_mean", "model.24.m32.1.m.0.cv2.bn.running_var", "model.24.m32.1.m.0.cv2.bn.num_batches_tracked", "model.24.m16.0.cv1.conv.weight", "model.24.m16.0.cv1.bn.weight", "model.24.m16.0.cv1.bn.bias", "model.24.m16.0.cv1.bn.running_mean", "model.24.m16.0.cv1.bn.running_var", "model.24.m16.0.cv1.bn.num_batches_tracked", "model.24.m16.0.cv2.conv.weight", "model.24.m16.0.cv2.bn.weight", "model.24.m16.0.cv2.bn.bias", "model.24.m16.0.cv2.bn.running_mean", "model.24.m16.0.cv2.bn.running_var", "model.24.m16.0.cv2.bn.num_batches_tracked", "model.24.m16.0.cv3.conv.weight", "model.24.m16.0.cv3.bn.weight", "model.24.m16.0.cv3.bn.bias", "model.24.m16.0.cv3.bn.running_mean", "model.24.m16.0.cv3.bn.running_var", "model.24.m16.0.cv3.bn.num_batches_tracked", "model.24.m16.0.m.0.cv1.conv.weight", "model.24.m16.0.m.0.cv1.bn.weight", "model.24.m16.0.m.0.cv1.bn.bias", "model.24.m16.0.m.0.cv1.bn.running_mean", "model.24.m16.0.m.0.cv1.bn.running_var", "model.24.m16.0.m.0.cv1.bn.num_batches_tracked", "model.24.m16.0.m.0.cv2.conv.weight", "model.24.m16.0.m.0.cv2.bn.weight", "model.24.m16.0.m.0.cv2.bn.bias", "model.24.m16.0.m.0.cv2.bn.running_mean", "model.24.m16.0.m.0.cv2.bn.running_var", "model.24.m16.0.m.0.cv2.bn.num_batches_tracked".

size mismatch for model.24.m.2.cv1.conv.weight: copying a param with shape torch.Size([128, 256, 1, 1]) from checkpoint, the shape in current model is torch.Size([32, 128, 1, 1]).

size mismatch for model.24.m.2.cv1.bn.weight: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv1.bn.bias: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv1.bn.running_mean: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv1.bn.running_var: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv2.conv.weight: copying a param with shape torch.Size([128, 256, 1, 1]) from checkpoint, the shape in current model is torch.Size([32, 128, 1, 1]).

size mismatch for model.24.m.2.cv2.bn.weight: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv2.bn.bias: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv2.bn.running_mean: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv2.bn.running_var: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv3.conv.weight: copying a param with shape torch.Size([256, 320, 1, 1]) from checkpoint, the shape in current model is torch.Size([64, 64, 1, 1]).

size mismatch for model.24.m.2.cv3.bn.weight: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.m.2.cv3.bn.bias: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.m.2.cv3.bn.running_mean: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.m.2.cv3.bn.running_var: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv1.conv.weight: copying a param with shape torch.Size([128, 256, 1, 1]) from checkpoint, the shape in current model is torch.Size([64, 256, 1, 1]).

size mismatch for model.24.decoder1.1.cv1.bn.weight: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv1.bn.bias: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv1.bn.running_mean: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv1.bn.running_var: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv2.conv.weight: copying a param with shape torch.Size([128, 256, 1, 1]) from checkpoint, the shape in current model is torch.Size([64, 256, 1, 1]).

size mismatch for model.24.decoder1.1.cv2.bn.weight: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv2.bn.bias: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv2.bn.running_mean: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv2.bn.running_var: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv3.conv.weight: copying a param with shape torch.Size([256, 256, 1, 1]) from checkpoint, the shape in current model is torch.Size([128, 128, 1, 1]).

size mismatch for model.24.decoder1.1.cv3.bn.weight: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([128]).

size mismatch for model.24.decoder1.1.cv3.bn.bias: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([128]).

size mismatch for model.24.decoder1.1.cv3.bn.running_mean: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([128]).

size mismatch for model.24.decoder1.1.cv3.bn.running_var: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([128]).

size mismatch for model.24.decoder1.1.m.0.cv1.conv.weight: copying a param with shape torch.Size([128, 128, 1, 1]) from checkpoint, the shape in current model is torch.Size([64, 64, 1, 1]).

size mismatch for model.24.decoder1.1.m.0.cv1.bn.weight: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.m.0.cv1.bn.bias: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.m.0.cv1.bn.running_mean: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.m.0.cv1.bn.running_var: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.m.0.cv2.conv.weight: copying a param with shape torch.Size([128, 128, 3, 3]) from checkpoint, the shape in current model is torch.Size([64, 64, 3, 3]).

size mismatch for model.24.decoder1.1.m.0.cv2.bn.weight: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.m.0.cv2.bn.bias: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.m.0.cv2.bn.running_mean: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.m.0.cv2.bn.running_var: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

相关推荐
晚烛1 天前
CANN + 物理信息神经网络(PINNs):求解偏微分方程的新范式
javascript·人工智能·flutter·html·零售
爱吃烤鸡翅的酸菜鱼1 天前
CANN ops-math向量运算与特殊函数实现解析
人工智能·aigc
波动几何1 天前
OpenClaw 构建指南:打造智能多工具编排运行时框架
人工智能
程序猿追1 天前
深度解码AI之魂:CANN Compiler 核心架构与技术演进
人工智能·架构
新缸中之脑1 天前
Figma Make 提示工程
人工智能·figma
赫尔·普莱蒂科萨·帕塔1 天前
智能体工程
人工智能·机器人·软件工程·agi
觉醒大王1 天前
AI写的青基中了
人工智能·笔记·深度学习·学习·职场和发展·学习方法
深鱼~1 天前
深度剖析ops-transformer:LayerNorm与GEMM的融合优化
人工智能·深度学习·transformer
哈__1 天前
CANN图优化技术:深度学习模型的编译器魔法
人工智能·深度学习
灰灰勇闯IT1 天前
神经网络的基石——深度解析 CANN ops-nn 算子库如何赋能昇腾 AI
人工智能·深度学习·神经网络