loaderr

Traceback (most recent call last):

File "/ai/zhdata/lyp/multiyolov5_point_608_736/train_608_736.py", line 718, in <module>

train(hyp, opt, device, tb_writer)

File "/ai/zhdata/lyp/multiyolov5_point_608_736/train_608_736.py", line 166, in train

ema.ema.load_state_dict(ckpt['ema'].float().state_dict())

File "/ai/zhdata/lyp/conda/anaconda3/envs/mmd3.0/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1482, in load_state_dict

raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(

RuntimeError: Error(s) in loading state_dict for Model:

Missing key(s) in state_dict: "model.24.m.0.cv1.conv.weight", "model.24.m.0.cv1.bn.weight", "model.24.m.0.cv1.bn.bias", "model.24.m.0.cv1.bn.running_mean", "model.24.m.0.cv1.bn.running_var", "model.24.m.0.cv2.conv.weight", "model.24.m.0.cv2.bn.weight", "model.24.m.0.cv2.bn.bias", "model.24.m.0.cv2.bn.running_mean", "model.24.m.0.cv2.bn.running_var", "model.24.m.0.cv3.conv.weight", "model.24.m.0.cv3.bn.weight", "model.24.m.0.cv3.bn.bias", "model.24.m.0.cv3.bn.running_mean", "model.24.m.0.cv3.bn.running_var", "model.24.m.0.m.0.cv1.conv.weight", "model.24.m.0.m.0.cv1.bn.weight", "model.24.m.0.m.0.cv1.bn.bias", "model.24.m.0.m.0.cv1.bn.running_mean", "model.24.m.0.m.0.cv1.bn.running_var", "model.24.m.0.m.0.cv2.conv.weight", "model.24.m.0.m.0.cv2.bn.weight", "model.24.m.0.m.0.cv2.bn.bias", "model.24.m.0.m.0.cv2.bn.running_mean", "model.24.m.0.m.0.cv2.bn.running_var", "model.24.m.2.m.0.cv1.conv.weight", "model.24.m.2.m.0.cv1.bn.weight", "model.24.m.2.m.0.cv1.bn.bias", "model.24.m.2.m.0.cv1.bn.running_mean", "model.24.m.2.m.0.cv1.bn.running_var", "model.24.m.2.m.0.cv2.conv.weight", "model.24.m.2.m.0.cv2.bn.weight", "model.24.m.2.m.0.cv2.bn.bias", "model.24.m.2.m.0.cv2.bn.running_mean", "model.24.m.2.m.0.cv2.bn.running_var", "model.24.m.4.cv1.conv.weight", "model.24.m.4.cv1.bn.weight", "model.24.m.4.cv1.bn.bias", "model.24.m.4.cv1.bn.running_mean", "model.24.m.4.cv1.bn.running_var", "model.24.m.4.cv2.conv.weight", "model.24.m.4.cv2.bn.weight", "model.24.m.4.cv2.bn.bias", "model.24.m.4.cv2.bn.running_mean", "model.24.m.4.cv2.bn.running_var", "model.24.m.4.cv3.conv.weight", "model.24.m.4.cv3.bn.weight", "model.24.m.4.cv3.bn.bias", "model.24.m.4.cv3.bn.running_mean", "model.24.m.4.cv3.bn.running_var", "model.24.m.4.m.0.cv1.conv.weight", "model.24.m.4.m.0.cv1.bn.weight", "model.24.m.4.m.0.cv1.bn.bias", "model.24.m.4.m.0.cv1.bn.running_mean", "model.24.m.4.m.0.cv1.bn.running_var", "model.24.m.4.m.0.cv2.conv.weight", "model.24.m.4.m.0.cv2.bn.weight", "model.24.m.4.m.0.cv2.bn.bias", "model.24.m.4.m.0.cv2.bn.running_mean", "model.24.m.4.m.0.cv2.bn.running_var", "model.24.m.5.weight", "model.24.m.5.bias", "model.24.decoder1.3.cv1.conv.weight", "model.24.decoder1.3.cv1.bn.weight", "model.24.decoder1.3.cv1.bn.bias", "model.24.decoder1.3.cv1.bn.running_mean", "model.24.decoder1.3.cv1.bn.running_var", "model.24.decoder1.3.cv2.conv.weight", "model.24.decoder1.3.cv2.bn.weight", "model.24.decoder1.3.cv2.bn.bias", "model.24.decoder1.3.cv2.bn.running_mean", "model.24.decoder1.3.cv2.bn.running_var", "model.24.decoder1.3.cv3.conv.weight", "model.24.decoder1.3.cv3.bn.weight", "model.24.decoder1.3.cv3.bn.bias", "model.24.decoder1.3.cv3.bn.running_mean", "model.24.decoder1.3.cv3.bn.running_var", "model.24.decoder1.3.m.0.cv1.conv.weight", "model.24.decoder1.3.m.0.cv1.bn.weight", "model.24.decoder1.3.m.0.cv1.bn.bias", "model.24.decoder1.3.m.0.cv1.bn.running_mean", "model.24.decoder1.3.m.0.cv1.bn.running_var", "model.24.decoder1.3.m.0.cv2.conv.weight", "model.24.decoder1.3.m.0.cv2.bn.weight", "model.24.decoder1.3.m.0.cv2.bn.bias", "model.24.decoder1.3.m.0.cv2.bn.running_mean", "model.24.decoder1.3.m.0.cv2.bn.running_var", "model.24.decoder1.5.cv1.conv.weight", "model.24.decoder1.5.cv1.bn.weight", "model.24.decoder1.5.cv1.bn.bias", "model.24.decoder1.5.cv1.bn.running_mean", "model.24.decoder1.5.cv1.bn.running_var", "model.24.decoder1.5.cv2.conv.weight", "model.24.decoder1.5.cv2.bn.weight", "model.24.decoder1.5.cv2.bn.bias", "model.24.decoder1.5.cv2.bn.running_mean", "model.24.decoder1.5.cv2.bn.running_var", "model.24.decoder1.5.cv3.conv.weight", "model.24.decoder1.5.cv3.bn.weight", "model.24.decoder1.5.cv3.bn.bias", "model.24.decoder1.5.cv3.bn.running_mean", "model.24.decoder1.5.cv3.bn.running_var", "model.24.decoder1.5.m.0.cv1.conv.weight", "model.24.decoder1.5.m.0.cv1.bn.weight", "model.24.decoder1.5.m.0.cv1.bn.bias", "model.24.decoder1.5.m.0.cv1.bn.running_mean", "model.24.decoder1.5.m.0.cv1.bn.running_var", "model.24.decoder1.5.m.0.cv2.conv.weight", "model.24.decoder1.5.m.0.cv2.bn.weight", "model.24.decoder1.5.m.0.cv2.bn.bias", "model.24.decoder1.5.m.0.cv2.bn.running_mean", "model.24.decoder1.5.m.0.cv2.bn.running_var", "model.24.decoder1.6.weight", "model.24.decoder1.6.bias", "model.24.m32.2.cv1.conv.weight", "model.24.m32.2.cv1.bn.weight", "model.24.m32.2.cv1.bn.bias", "model.24.m32.2.cv1.bn.running_mean", "model.24.m32.2.cv1.bn.running_var", "model.24.m32.2.cv2.conv.weight", "model.24.m32.2.cv2.bn.weight", "model.24.m32.2.cv2.bn.bias", "model.24.m32.2.cv2.bn.running_mean", "model.24.m32.2.cv2.bn.running_var", "model.24.m32.2.cv3.conv.weight", "model.24.m32.2.cv3.bn.weight", "model.24.m32.2.cv3.bn.bias", "model.24.m32.2.cv3.bn.running_mean", "model.24.m32.2.cv3.bn.running_var", "model.24.m32.2.m.0.cv1.conv.weight", "model.24.m32.2.m.0.cv1.bn.weight", "model.24.m32.2.m.0.cv1.bn.bias", "model.24.m32.2.m.0.cv1.bn.running_mean", "model.24.m32.2.m.0.cv1.bn.running_var", "model.24.m32.2.m.0.cv2.conv.weight", "model.24.m32.2.m.0.cv2.bn.weight", "model.24.m32.2.m.0.cv2.bn.bias", "model.24.m32.2.m.0.cv2.bn.running_mean", "model.24.m32.2.m.0.cv2.bn.running_var", "model.24.m16.0.conv.weight", "model.24.m16.0.bn.weight", "model.24.m16.0.bn.bias", "model.24.m16.0.bn.running_mean", "model.24.m16.0.bn.running_var".

Unexpected key(s) in state_dict: "model.24.m.0.conv.weight", "model.24.m.0.bn.weight", "model.24.m.0.bn.bias", "model.24.m.0.bn.running_mean", "model.24.m.0.bn.running_var", "model.24.m.0.bn.num_batches_tracked", "model.24.m.1.cv1.conv.weight", "model.24.m.1.cv1.bn.weight", "model.24.m.1.cv1.bn.bias", "model.24.m.1.cv1.bn.running_mean", "model.24.m.1.cv1.bn.running_var", "model.24.m.1.cv1.bn.num_batches_tracked", "model.24.m.1.cv2.conv.weight", "model.24.m.1.cv2.bn.weight", "model.24.m.1.cv2.bn.bias", "model.24.m.1.cv2.bn.running_mean", "model.24.m.1.cv2.bn.running_var", "model.24.m.1.cv2.bn.num_batches_tracked", "model.24.m.1.cv3.conv.weight", "model.24.m.1.cv3.bn.weight", "model.24.m.1.cv3.bn.bias", "model.24.m.1.cv3.bn.running_mean", "model.24.m.1.cv3.bn.running_var", "model.24.m.1.cv3.bn.num_batches_tracked", "model.24.m.1.m.0.cv1.conv.weight", "model.24.m.1.m.0.cv1.bn.weight", "model.24.m.1.m.0.cv1.bn.bias", "model.24.m.1.m.0.cv1.bn.running_mean", "model.24.m.1.m.0.cv1.bn.running_var", "model.24.m.1.m.0.cv1.bn.num_batches_tracked", "model.24.m.1.m.0.cv2.conv.weight", "model.24.m.1.m.0.cv2.bn.weight", "model.24.m.1.m.0.cv2.bn.bias", "model.24.m.1.m.0.cv2.bn.running_mean", "model.24.m.1.m.0.cv2.bn.running_var", "model.24.m.1.m.0.cv2.bn.num_batches_tracked", "model.24.m.2.m.cv1.conv.weight", "model.24.m.2.m.cv1.bn.weight", "model.24.m.2.m.cv1.bn.bias", "model.24.m.2.m.cv1.bn.running_mean", "model.24.m.2.m.cv1.bn.running_var", "model.24.m.2.m.cv1.bn.num_batches_tracked", "model.24.m.2.m.cv2.conv.weight", "model.24.m.2.m.cv2.bn.weight", "model.24.m.2.m.cv2.bn.bias", "model.24.m.2.m.cv2.bn.running_mean", "model.24.m.2.m.cv2.bn.running_var", "model.24.m.2.m.cv2.bn.num_batches_tracked", "model.24.m.3.weight", "model.24.m.3.bias", "model.24.decoder1.2.cv1.conv.weight", "model.24.decoder1.2.cv1.bn.weight", "model.24.decoder1.2.cv1.bn.bias", "model.24.decoder1.2.cv1.bn.running_mean", "model.24.decoder1.2.cv1.bn.running_var", "model.24.decoder1.2.cv1.bn.num_batches_tracked", "model.24.decoder1.2.cv2.conv.weight", "model.24.decoder1.2.cv2.bn.weight", "model.24.decoder1.2.cv2.bn.bias", "model.24.decoder1.2.cv2.bn.running_mean", "model.24.decoder1.2.cv2.bn.running_var", "model.24.decoder1.2.cv2.bn.num_batches_tracked", "model.24.decoder1.2.cv3.conv.weight", "model.24.decoder1.2.cv3.bn.weight", "model.24.decoder1.2.cv3.bn.bias", "model.24.decoder1.2.cv3.bn.running_mean", "model.24.decoder1.2.cv3.bn.running_var", "model.24.decoder1.2.cv3.bn.num_batches_tracked", "model.24.decoder1.2.m.cv1.conv.weight", "model.24.decoder1.2.m.cv1.bn.weight", "model.24.decoder1.2.m.cv1.bn.bias", "model.24.decoder1.2.m.cv1.bn.running_mean", "model.24.decoder1.2.m.cv1.bn.running_var", "model.24.decoder1.2.m.cv1.bn.num_batches_tracked", "model.24.decoder1.2.m.cv2.conv.weight", "model.24.decoder1.2.m.cv2.bn.weight", "model.24.decoder1.2.m.cv2.bn.bias", "model.24.decoder1.2.m.cv2.bn.running_mean", "model.24.decoder1.2.m.cv2.bn.running_var", "model.24.decoder1.2.m.cv2.bn.num_batches_tracked", "model.24.decoder1.3.weight", "model.24.decoder1.3.bias", "model.24.m8.1.cv1.conv.weight", "model.24.m8.1.cv1.bn.weight", "model.24.m8.1.cv1.bn.bias", "model.24.m8.1.cv1.bn.running_mean", "model.24.m8.1.cv1.bn.running_var", "model.24.m8.1.cv1.bn.num_batches_tracked", "model.24.m8.1.cv2.conv.weight", "model.24.m8.1.cv2.bn.weight", "model.24.m8.1.cv2.bn.bias", "model.24.m8.1.cv2.bn.running_mean", "model.24.m8.1.cv2.bn.running_var", "model.24.m8.1.cv2.bn.num_batches_tracked", "model.24.m8.1.cv3.conv.weight", "model.24.m8.1.cv3.bn.weight", "model.24.m8.1.cv3.bn.bias", "model.24.m8.1.cv3.bn.running_mean", "model.24.m8.1.cv3.bn.running_var", "model.24.m8.1.cv3.bn.num_batches_tracked", "model.24.m8.1.m.0.cv1.conv.weight", "model.24.m8.1.m.0.cv1.bn.weight", "model.24.m8.1.m.0.cv1.bn.bias", "model.24.m8.1.m.0.cv1.bn.running_mean", "model.24.m8.1.m.0.cv1.bn.running_var", "model.24.m8.1.m.0.cv1.bn.num_batches_tracked", "model.24.m8.1.m.0.cv2.conv.weight", "model.24.m8.1.m.0.cv2.bn.weight", "model.24.m8.1.m.0.cv2.bn.bias", "model.24.m8.1.m.0.cv2.bn.running_mean", "model.24.m8.1.m.0.cv2.bn.running_var", "model.24.m8.1.m.0.cv2.bn.num_batches_tracked", "model.24.m32.1.cv1.conv.weight", "model.24.m32.1.cv1.bn.weight", "model.24.m32.1.cv1.bn.bias", "model.24.m32.1.cv1.bn.running_mean", "model.24.m32.1.cv1.bn.running_var", "model.24.m32.1.cv1.bn.num_batches_tracked", "model.24.m32.1.cv2.conv.weight", "model.24.m32.1.cv2.bn.weight", "model.24.m32.1.cv2.bn.bias", "model.24.m32.1.cv2.bn.running_mean", "model.24.m32.1.cv2.bn.running_var", "model.24.m32.1.cv2.bn.num_batches_tracked", "model.24.m32.1.cv3.conv.weight", "model.24.m32.1.cv3.bn.weight", "model.24.m32.1.cv3.bn.bias", "model.24.m32.1.cv3.bn.running_mean", "model.24.m32.1.cv3.bn.running_var", "model.24.m32.1.cv3.bn.num_batches_tracked", "model.24.m32.1.m.0.cv1.conv.weight", "model.24.m32.1.m.0.cv1.bn.weight", "model.24.m32.1.m.0.cv1.bn.bias", "model.24.m32.1.m.0.cv1.bn.running_mean", "model.24.m32.1.m.0.cv1.bn.running_var", "model.24.m32.1.m.0.cv1.bn.num_batches_tracked", "model.24.m32.1.m.0.cv2.conv.weight", "model.24.m32.1.m.0.cv2.bn.weight", "model.24.m32.1.m.0.cv2.bn.bias", "model.24.m32.1.m.0.cv2.bn.running_mean", "model.24.m32.1.m.0.cv2.bn.running_var", "model.24.m32.1.m.0.cv2.bn.num_batches_tracked", "model.24.m16.0.cv1.conv.weight", "model.24.m16.0.cv1.bn.weight", "model.24.m16.0.cv1.bn.bias", "model.24.m16.0.cv1.bn.running_mean", "model.24.m16.0.cv1.bn.running_var", "model.24.m16.0.cv1.bn.num_batches_tracked", "model.24.m16.0.cv2.conv.weight", "model.24.m16.0.cv2.bn.weight", "model.24.m16.0.cv2.bn.bias", "model.24.m16.0.cv2.bn.running_mean", "model.24.m16.0.cv2.bn.running_var", "model.24.m16.0.cv2.bn.num_batches_tracked", "model.24.m16.0.cv3.conv.weight", "model.24.m16.0.cv3.bn.weight", "model.24.m16.0.cv3.bn.bias", "model.24.m16.0.cv3.bn.running_mean", "model.24.m16.0.cv3.bn.running_var", "model.24.m16.0.cv3.bn.num_batches_tracked", "model.24.m16.0.m.0.cv1.conv.weight", "model.24.m16.0.m.0.cv1.bn.weight", "model.24.m16.0.m.0.cv1.bn.bias", "model.24.m16.0.m.0.cv1.bn.running_mean", "model.24.m16.0.m.0.cv1.bn.running_var", "model.24.m16.0.m.0.cv1.bn.num_batches_tracked", "model.24.m16.0.m.0.cv2.conv.weight", "model.24.m16.0.m.0.cv2.bn.weight", "model.24.m16.0.m.0.cv2.bn.bias", "model.24.m16.0.m.0.cv2.bn.running_mean", "model.24.m16.0.m.0.cv2.bn.running_var", "model.24.m16.0.m.0.cv2.bn.num_batches_tracked".

size mismatch for model.24.m.2.cv1.conv.weight: copying a param with shape torch.Size([128, 256, 1, 1]) from checkpoint, the shape in current model is torch.Size([32, 128, 1, 1]).

size mismatch for model.24.m.2.cv1.bn.weight: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv1.bn.bias: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv1.bn.running_mean: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv1.bn.running_var: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv2.conv.weight: copying a param with shape torch.Size([128, 256, 1, 1]) from checkpoint, the shape in current model is torch.Size([32, 128, 1, 1]).

size mismatch for model.24.m.2.cv2.bn.weight: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv2.bn.bias: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv2.bn.running_mean: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv2.bn.running_var: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv3.conv.weight: copying a param with shape torch.Size([256, 320, 1, 1]) from checkpoint, the shape in current model is torch.Size([64, 64, 1, 1]).

size mismatch for model.24.m.2.cv3.bn.weight: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.m.2.cv3.bn.bias: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.m.2.cv3.bn.running_mean: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.m.2.cv3.bn.running_var: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv1.conv.weight: copying a param with shape torch.Size([128, 256, 1, 1]) from checkpoint, the shape in current model is torch.Size([64, 256, 1, 1]).

size mismatch for model.24.decoder1.1.cv1.bn.weight: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv1.bn.bias: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv1.bn.running_mean: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv1.bn.running_var: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv2.conv.weight: copying a param with shape torch.Size([128, 256, 1, 1]) from checkpoint, the shape in current model is torch.Size([64, 256, 1, 1]).

size mismatch for model.24.decoder1.1.cv2.bn.weight: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv2.bn.bias: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv2.bn.running_mean: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv2.bn.running_var: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv3.conv.weight: copying a param with shape torch.Size([256, 256, 1, 1]) from checkpoint, the shape in current model is torch.Size([128, 128, 1, 1]).

size mismatch for model.24.decoder1.1.cv3.bn.weight: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([128]).

size mismatch for model.24.decoder1.1.cv3.bn.bias: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([128]).

size mismatch for model.24.decoder1.1.cv3.bn.running_mean: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([128]).

size mismatch for model.24.decoder1.1.cv3.bn.running_var: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([128]).

size mismatch for model.24.decoder1.1.m.0.cv1.conv.weight: copying a param with shape torch.Size([128, 128, 1, 1]) from checkpoint, the shape in current model is torch.Size([64, 64, 1, 1]).

size mismatch for model.24.decoder1.1.m.0.cv1.bn.weight: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.m.0.cv1.bn.bias: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.m.0.cv1.bn.running_mean: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.m.0.cv1.bn.running_var: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.m.0.cv2.conv.weight: copying a param with shape torch.Size([128, 128, 3, 3]) from checkpoint, the shape in current model is torch.Size([64, 64, 3, 3]).

size mismatch for model.24.decoder1.1.m.0.cv2.bn.weight: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.m.0.cv2.bn.bias: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.m.0.cv2.bn.running_mean: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.m.0.cv2.bn.running_var: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

相关推荐
AI白艿几秒前
男装市场稳健增长?AI助力精准把握消费新趋势
人工智能·aigc
5G全域通几秒前
工信部2026年短信业务合规申请全流程官方指南(1月1日强制生效)
大数据·网络·人工智能·信息与通信·时序数据库
木卫四科技几秒前
【CES 2026】木卫四科技携“合规全生命周期”汽车网络安全方案亮相 CES 2026
人工智能·木卫四科技·ces2026·智能汽车安全
爱思德学术2 分钟前
中国计算机学会(CCF)推荐学术会议-B(交叉/综合/新兴):CogSci 2026
人工智能·神经网络·认知科学
好奇龙猫4 分钟前
【人工智能学习-AI-MIT公开课第 16 讲:支持向量机(SVM)】
人工智能·学习·支持向量机
环黄金线HHJX.4 分钟前
【MCP: Tuan编程 + Qt架构 + QoS - 量子-经典混合计算管理控制平台】
ide·人工智能·qt·编辑器·量子计算
duyinbi75176 分钟前
【环境监测】河流目标检测:建筑物、树木和水轮机识别实战
人工智能·目标检测·计算机视觉
cute_ming7 分钟前
浅谈提示词工程:企业级系统化实践与自动化架构(三)
人工智能·ubuntu·机器学习·架构·自动化
PHOSKEY9 分钟前
QM系列闪测仪在医用核心原材料(TC4/PEEK/ 硅胶)质控方案
人工智能·机器学习
WJSKad123511 分钟前
【狮子目标检测】YOLO13-GhostDynamicConv改进模型实现与性能分析
人工智能·目标检测·计算机视觉