loaderr

Traceback (most recent call last):

File "/ai/zhdata/lyp/multiyolov5_point_608_736/train_608_736.py", line 718, in <module>

train(hyp, opt, device, tb_writer)

File "/ai/zhdata/lyp/multiyolov5_point_608_736/train_608_736.py", line 166, in train

ema.ema.load_state_dict(ckpt['ema'].float().state_dict())

File "/ai/zhdata/lyp/conda/anaconda3/envs/mmd3.0/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1482, in load_state_dict

raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(

RuntimeError: Error(s) in loading state_dict for Model:

Missing key(s) in state_dict: "model.24.m.0.cv1.conv.weight", "model.24.m.0.cv1.bn.weight", "model.24.m.0.cv1.bn.bias", "model.24.m.0.cv1.bn.running_mean", "model.24.m.0.cv1.bn.running_var", "model.24.m.0.cv2.conv.weight", "model.24.m.0.cv2.bn.weight", "model.24.m.0.cv2.bn.bias", "model.24.m.0.cv2.bn.running_mean", "model.24.m.0.cv2.bn.running_var", "model.24.m.0.cv3.conv.weight", "model.24.m.0.cv3.bn.weight", "model.24.m.0.cv3.bn.bias", "model.24.m.0.cv3.bn.running_mean", "model.24.m.0.cv3.bn.running_var", "model.24.m.0.m.0.cv1.conv.weight", "model.24.m.0.m.0.cv1.bn.weight", "model.24.m.0.m.0.cv1.bn.bias", "model.24.m.0.m.0.cv1.bn.running_mean", "model.24.m.0.m.0.cv1.bn.running_var", "model.24.m.0.m.0.cv2.conv.weight", "model.24.m.0.m.0.cv2.bn.weight", "model.24.m.0.m.0.cv2.bn.bias", "model.24.m.0.m.0.cv2.bn.running_mean", "model.24.m.0.m.0.cv2.bn.running_var", "model.24.m.2.m.0.cv1.conv.weight", "model.24.m.2.m.0.cv1.bn.weight", "model.24.m.2.m.0.cv1.bn.bias", "model.24.m.2.m.0.cv1.bn.running_mean", "model.24.m.2.m.0.cv1.bn.running_var", "model.24.m.2.m.0.cv2.conv.weight", "model.24.m.2.m.0.cv2.bn.weight", "model.24.m.2.m.0.cv2.bn.bias", "model.24.m.2.m.0.cv2.bn.running_mean", "model.24.m.2.m.0.cv2.bn.running_var", "model.24.m.4.cv1.conv.weight", "model.24.m.4.cv1.bn.weight", "model.24.m.4.cv1.bn.bias", "model.24.m.4.cv1.bn.running_mean", "model.24.m.4.cv1.bn.running_var", "model.24.m.4.cv2.conv.weight", "model.24.m.4.cv2.bn.weight", "model.24.m.4.cv2.bn.bias", "model.24.m.4.cv2.bn.running_mean", "model.24.m.4.cv2.bn.running_var", "model.24.m.4.cv3.conv.weight", "model.24.m.4.cv3.bn.weight", "model.24.m.4.cv3.bn.bias", "model.24.m.4.cv3.bn.running_mean", "model.24.m.4.cv3.bn.running_var", "model.24.m.4.m.0.cv1.conv.weight", "model.24.m.4.m.0.cv1.bn.weight", "model.24.m.4.m.0.cv1.bn.bias", "model.24.m.4.m.0.cv1.bn.running_mean", "model.24.m.4.m.0.cv1.bn.running_var", "model.24.m.4.m.0.cv2.conv.weight", "model.24.m.4.m.0.cv2.bn.weight", "model.24.m.4.m.0.cv2.bn.bias", "model.24.m.4.m.0.cv2.bn.running_mean", "model.24.m.4.m.0.cv2.bn.running_var", "model.24.m.5.weight", "model.24.m.5.bias", "model.24.decoder1.3.cv1.conv.weight", "model.24.decoder1.3.cv1.bn.weight", "model.24.decoder1.3.cv1.bn.bias", "model.24.decoder1.3.cv1.bn.running_mean", "model.24.decoder1.3.cv1.bn.running_var", "model.24.decoder1.3.cv2.conv.weight", "model.24.decoder1.3.cv2.bn.weight", "model.24.decoder1.3.cv2.bn.bias", "model.24.decoder1.3.cv2.bn.running_mean", "model.24.decoder1.3.cv2.bn.running_var", "model.24.decoder1.3.cv3.conv.weight", "model.24.decoder1.3.cv3.bn.weight", "model.24.decoder1.3.cv3.bn.bias", "model.24.decoder1.3.cv3.bn.running_mean", "model.24.decoder1.3.cv3.bn.running_var", "model.24.decoder1.3.m.0.cv1.conv.weight", "model.24.decoder1.3.m.0.cv1.bn.weight", "model.24.decoder1.3.m.0.cv1.bn.bias", "model.24.decoder1.3.m.0.cv1.bn.running_mean", "model.24.decoder1.3.m.0.cv1.bn.running_var", "model.24.decoder1.3.m.0.cv2.conv.weight", "model.24.decoder1.3.m.0.cv2.bn.weight", "model.24.decoder1.3.m.0.cv2.bn.bias", "model.24.decoder1.3.m.0.cv2.bn.running_mean", "model.24.decoder1.3.m.0.cv2.bn.running_var", "model.24.decoder1.5.cv1.conv.weight", "model.24.decoder1.5.cv1.bn.weight", "model.24.decoder1.5.cv1.bn.bias", "model.24.decoder1.5.cv1.bn.running_mean", "model.24.decoder1.5.cv1.bn.running_var", "model.24.decoder1.5.cv2.conv.weight", "model.24.decoder1.5.cv2.bn.weight", "model.24.decoder1.5.cv2.bn.bias", "model.24.decoder1.5.cv2.bn.running_mean", "model.24.decoder1.5.cv2.bn.running_var", "model.24.decoder1.5.cv3.conv.weight", "model.24.decoder1.5.cv3.bn.weight", "model.24.decoder1.5.cv3.bn.bias", "model.24.decoder1.5.cv3.bn.running_mean", "model.24.decoder1.5.cv3.bn.running_var", "model.24.decoder1.5.m.0.cv1.conv.weight", "model.24.decoder1.5.m.0.cv1.bn.weight", "model.24.decoder1.5.m.0.cv1.bn.bias", "model.24.decoder1.5.m.0.cv1.bn.running_mean", "model.24.decoder1.5.m.0.cv1.bn.running_var", "model.24.decoder1.5.m.0.cv2.conv.weight", "model.24.decoder1.5.m.0.cv2.bn.weight", "model.24.decoder1.5.m.0.cv2.bn.bias", "model.24.decoder1.5.m.0.cv2.bn.running_mean", "model.24.decoder1.5.m.0.cv2.bn.running_var", "model.24.decoder1.6.weight", "model.24.decoder1.6.bias", "model.24.m32.2.cv1.conv.weight", "model.24.m32.2.cv1.bn.weight", "model.24.m32.2.cv1.bn.bias", "model.24.m32.2.cv1.bn.running_mean", "model.24.m32.2.cv1.bn.running_var", "model.24.m32.2.cv2.conv.weight", "model.24.m32.2.cv2.bn.weight", "model.24.m32.2.cv2.bn.bias", "model.24.m32.2.cv2.bn.running_mean", "model.24.m32.2.cv2.bn.running_var", "model.24.m32.2.cv3.conv.weight", "model.24.m32.2.cv3.bn.weight", "model.24.m32.2.cv3.bn.bias", "model.24.m32.2.cv3.bn.running_mean", "model.24.m32.2.cv3.bn.running_var", "model.24.m32.2.m.0.cv1.conv.weight", "model.24.m32.2.m.0.cv1.bn.weight", "model.24.m32.2.m.0.cv1.bn.bias", "model.24.m32.2.m.0.cv1.bn.running_mean", "model.24.m32.2.m.0.cv1.bn.running_var", "model.24.m32.2.m.0.cv2.conv.weight", "model.24.m32.2.m.0.cv2.bn.weight", "model.24.m32.2.m.0.cv2.bn.bias", "model.24.m32.2.m.0.cv2.bn.running_mean", "model.24.m32.2.m.0.cv2.bn.running_var", "model.24.m16.0.conv.weight", "model.24.m16.0.bn.weight", "model.24.m16.0.bn.bias", "model.24.m16.0.bn.running_mean", "model.24.m16.0.bn.running_var".

Unexpected key(s) in state_dict: "model.24.m.0.conv.weight", "model.24.m.0.bn.weight", "model.24.m.0.bn.bias", "model.24.m.0.bn.running_mean", "model.24.m.0.bn.running_var", "model.24.m.0.bn.num_batches_tracked", "model.24.m.1.cv1.conv.weight", "model.24.m.1.cv1.bn.weight", "model.24.m.1.cv1.bn.bias", "model.24.m.1.cv1.bn.running_mean", "model.24.m.1.cv1.bn.running_var", "model.24.m.1.cv1.bn.num_batches_tracked", "model.24.m.1.cv2.conv.weight", "model.24.m.1.cv2.bn.weight", "model.24.m.1.cv2.bn.bias", "model.24.m.1.cv2.bn.running_mean", "model.24.m.1.cv2.bn.running_var", "model.24.m.1.cv2.bn.num_batches_tracked", "model.24.m.1.cv3.conv.weight", "model.24.m.1.cv3.bn.weight", "model.24.m.1.cv3.bn.bias", "model.24.m.1.cv3.bn.running_mean", "model.24.m.1.cv3.bn.running_var", "model.24.m.1.cv3.bn.num_batches_tracked", "model.24.m.1.m.0.cv1.conv.weight", "model.24.m.1.m.0.cv1.bn.weight", "model.24.m.1.m.0.cv1.bn.bias", "model.24.m.1.m.0.cv1.bn.running_mean", "model.24.m.1.m.0.cv1.bn.running_var", "model.24.m.1.m.0.cv1.bn.num_batches_tracked", "model.24.m.1.m.0.cv2.conv.weight", "model.24.m.1.m.0.cv2.bn.weight", "model.24.m.1.m.0.cv2.bn.bias", "model.24.m.1.m.0.cv2.bn.running_mean", "model.24.m.1.m.0.cv2.bn.running_var", "model.24.m.1.m.0.cv2.bn.num_batches_tracked", "model.24.m.2.m.cv1.conv.weight", "model.24.m.2.m.cv1.bn.weight", "model.24.m.2.m.cv1.bn.bias", "model.24.m.2.m.cv1.bn.running_mean", "model.24.m.2.m.cv1.bn.running_var", "model.24.m.2.m.cv1.bn.num_batches_tracked", "model.24.m.2.m.cv2.conv.weight", "model.24.m.2.m.cv2.bn.weight", "model.24.m.2.m.cv2.bn.bias", "model.24.m.2.m.cv2.bn.running_mean", "model.24.m.2.m.cv2.bn.running_var", "model.24.m.2.m.cv2.bn.num_batches_tracked", "model.24.m.3.weight", "model.24.m.3.bias", "model.24.decoder1.2.cv1.conv.weight", "model.24.decoder1.2.cv1.bn.weight", "model.24.decoder1.2.cv1.bn.bias", "model.24.decoder1.2.cv1.bn.running_mean", "model.24.decoder1.2.cv1.bn.running_var", "model.24.decoder1.2.cv1.bn.num_batches_tracked", "model.24.decoder1.2.cv2.conv.weight", "model.24.decoder1.2.cv2.bn.weight", "model.24.decoder1.2.cv2.bn.bias", "model.24.decoder1.2.cv2.bn.running_mean", "model.24.decoder1.2.cv2.bn.running_var", "model.24.decoder1.2.cv2.bn.num_batches_tracked", "model.24.decoder1.2.cv3.conv.weight", "model.24.decoder1.2.cv3.bn.weight", "model.24.decoder1.2.cv3.bn.bias", "model.24.decoder1.2.cv3.bn.running_mean", "model.24.decoder1.2.cv3.bn.running_var", "model.24.decoder1.2.cv3.bn.num_batches_tracked", "model.24.decoder1.2.m.cv1.conv.weight", "model.24.decoder1.2.m.cv1.bn.weight", "model.24.decoder1.2.m.cv1.bn.bias", "model.24.decoder1.2.m.cv1.bn.running_mean", "model.24.decoder1.2.m.cv1.bn.running_var", "model.24.decoder1.2.m.cv1.bn.num_batches_tracked", "model.24.decoder1.2.m.cv2.conv.weight", "model.24.decoder1.2.m.cv2.bn.weight", "model.24.decoder1.2.m.cv2.bn.bias", "model.24.decoder1.2.m.cv2.bn.running_mean", "model.24.decoder1.2.m.cv2.bn.running_var", "model.24.decoder1.2.m.cv2.bn.num_batches_tracked", "model.24.decoder1.3.weight", "model.24.decoder1.3.bias", "model.24.m8.1.cv1.conv.weight", "model.24.m8.1.cv1.bn.weight", "model.24.m8.1.cv1.bn.bias", "model.24.m8.1.cv1.bn.running_mean", "model.24.m8.1.cv1.bn.running_var", "model.24.m8.1.cv1.bn.num_batches_tracked", "model.24.m8.1.cv2.conv.weight", "model.24.m8.1.cv2.bn.weight", "model.24.m8.1.cv2.bn.bias", "model.24.m8.1.cv2.bn.running_mean", "model.24.m8.1.cv2.bn.running_var", "model.24.m8.1.cv2.bn.num_batches_tracked", "model.24.m8.1.cv3.conv.weight", "model.24.m8.1.cv3.bn.weight", "model.24.m8.1.cv3.bn.bias", "model.24.m8.1.cv3.bn.running_mean", "model.24.m8.1.cv3.bn.running_var", "model.24.m8.1.cv3.bn.num_batches_tracked", "model.24.m8.1.m.0.cv1.conv.weight", "model.24.m8.1.m.0.cv1.bn.weight", "model.24.m8.1.m.0.cv1.bn.bias", "model.24.m8.1.m.0.cv1.bn.running_mean", "model.24.m8.1.m.0.cv1.bn.running_var", "model.24.m8.1.m.0.cv1.bn.num_batches_tracked", "model.24.m8.1.m.0.cv2.conv.weight", "model.24.m8.1.m.0.cv2.bn.weight", "model.24.m8.1.m.0.cv2.bn.bias", "model.24.m8.1.m.0.cv2.bn.running_mean", "model.24.m8.1.m.0.cv2.bn.running_var", "model.24.m8.1.m.0.cv2.bn.num_batches_tracked", "model.24.m32.1.cv1.conv.weight", "model.24.m32.1.cv1.bn.weight", "model.24.m32.1.cv1.bn.bias", "model.24.m32.1.cv1.bn.running_mean", "model.24.m32.1.cv1.bn.running_var", "model.24.m32.1.cv1.bn.num_batches_tracked", "model.24.m32.1.cv2.conv.weight", "model.24.m32.1.cv2.bn.weight", "model.24.m32.1.cv2.bn.bias", "model.24.m32.1.cv2.bn.running_mean", "model.24.m32.1.cv2.bn.running_var", "model.24.m32.1.cv2.bn.num_batches_tracked", "model.24.m32.1.cv3.conv.weight", "model.24.m32.1.cv3.bn.weight", "model.24.m32.1.cv3.bn.bias", "model.24.m32.1.cv3.bn.running_mean", "model.24.m32.1.cv3.bn.running_var", "model.24.m32.1.cv3.bn.num_batches_tracked", "model.24.m32.1.m.0.cv1.conv.weight", "model.24.m32.1.m.0.cv1.bn.weight", "model.24.m32.1.m.0.cv1.bn.bias", "model.24.m32.1.m.0.cv1.bn.running_mean", "model.24.m32.1.m.0.cv1.bn.running_var", "model.24.m32.1.m.0.cv1.bn.num_batches_tracked", "model.24.m32.1.m.0.cv2.conv.weight", "model.24.m32.1.m.0.cv2.bn.weight", "model.24.m32.1.m.0.cv2.bn.bias", "model.24.m32.1.m.0.cv2.bn.running_mean", "model.24.m32.1.m.0.cv2.bn.running_var", "model.24.m32.1.m.0.cv2.bn.num_batches_tracked", "model.24.m16.0.cv1.conv.weight", "model.24.m16.0.cv1.bn.weight", "model.24.m16.0.cv1.bn.bias", "model.24.m16.0.cv1.bn.running_mean", "model.24.m16.0.cv1.bn.running_var", "model.24.m16.0.cv1.bn.num_batches_tracked", "model.24.m16.0.cv2.conv.weight", "model.24.m16.0.cv2.bn.weight", "model.24.m16.0.cv2.bn.bias", "model.24.m16.0.cv2.bn.running_mean", "model.24.m16.0.cv2.bn.running_var", "model.24.m16.0.cv2.bn.num_batches_tracked", "model.24.m16.0.cv3.conv.weight", "model.24.m16.0.cv3.bn.weight", "model.24.m16.0.cv3.bn.bias", "model.24.m16.0.cv3.bn.running_mean", "model.24.m16.0.cv3.bn.running_var", "model.24.m16.0.cv3.bn.num_batches_tracked", "model.24.m16.0.m.0.cv1.conv.weight", "model.24.m16.0.m.0.cv1.bn.weight", "model.24.m16.0.m.0.cv1.bn.bias", "model.24.m16.0.m.0.cv1.bn.running_mean", "model.24.m16.0.m.0.cv1.bn.running_var", "model.24.m16.0.m.0.cv1.bn.num_batches_tracked", "model.24.m16.0.m.0.cv2.conv.weight", "model.24.m16.0.m.0.cv2.bn.weight", "model.24.m16.0.m.0.cv2.bn.bias", "model.24.m16.0.m.0.cv2.bn.running_mean", "model.24.m16.0.m.0.cv2.bn.running_var", "model.24.m16.0.m.0.cv2.bn.num_batches_tracked".

size mismatch for model.24.m.2.cv1.conv.weight: copying a param with shape torch.Size([128, 256, 1, 1]) from checkpoint, the shape in current model is torch.Size([32, 128, 1, 1]).

size mismatch for model.24.m.2.cv1.bn.weight: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv1.bn.bias: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv1.bn.running_mean: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv1.bn.running_var: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv2.conv.weight: copying a param with shape torch.Size([128, 256, 1, 1]) from checkpoint, the shape in current model is torch.Size([32, 128, 1, 1]).

size mismatch for model.24.m.2.cv2.bn.weight: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv2.bn.bias: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv2.bn.running_mean: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv2.bn.running_var: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([32]).

size mismatch for model.24.m.2.cv3.conv.weight: copying a param with shape torch.Size([256, 320, 1, 1]) from checkpoint, the shape in current model is torch.Size([64, 64, 1, 1]).

size mismatch for model.24.m.2.cv3.bn.weight: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.m.2.cv3.bn.bias: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.m.2.cv3.bn.running_mean: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.m.2.cv3.bn.running_var: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv1.conv.weight: copying a param with shape torch.Size([128, 256, 1, 1]) from checkpoint, the shape in current model is torch.Size([64, 256, 1, 1]).

size mismatch for model.24.decoder1.1.cv1.bn.weight: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv1.bn.bias: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv1.bn.running_mean: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv1.bn.running_var: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv2.conv.weight: copying a param with shape torch.Size([128, 256, 1, 1]) from checkpoint, the shape in current model is torch.Size([64, 256, 1, 1]).

size mismatch for model.24.decoder1.1.cv2.bn.weight: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv2.bn.bias: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv2.bn.running_mean: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv2.bn.running_var: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.cv3.conv.weight: copying a param with shape torch.Size([256, 256, 1, 1]) from checkpoint, the shape in current model is torch.Size([128, 128, 1, 1]).

size mismatch for model.24.decoder1.1.cv3.bn.weight: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([128]).

size mismatch for model.24.decoder1.1.cv3.bn.bias: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([128]).

size mismatch for model.24.decoder1.1.cv3.bn.running_mean: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([128]).

size mismatch for model.24.decoder1.1.cv3.bn.running_var: copying a param with shape torch.Size([256]) from checkpoint, the shape in current model is torch.Size([128]).

size mismatch for model.24.decoder1.1.m.0.cv1.conv.weight: copying a param with shape torch.Size([128, 128, 1, 1]) from checkpoint, the shape in current model is torch.Size([64, 64, 1, 1]).

size mismatch for model.24.decoder1.1.m.0.cv1.bn.weight: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.m.0.cv1.bn.bias: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.m.0.cv1.bn.running_mean: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.m.0.cv1.bn.running_var: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.m.0.cv2.conv.weight: copying a param with shape torch.Size([128, 128, 3, 3]) from checkpoint, the shape in current model is torch.Size([64, 64, 3, 3]).

size mismatch for model.24.decoder1.1.m.0.cv2.bn.weight: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.m.0.cv2.bn.bias: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.m.0.cv2.bn.running_mean: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

size mismatch for model.24.decoder1.1.m.0.cv2.bn.running_var: copying a param with shape torch.Size([128]) from checkpoint, the shape in current model is torch.Size([64]).

相关推荐
牛奶13 小时前
2026年大模型怎么选?前端人实用对比
前端·人工智能·ai编程
牛奶13 小时前
前端人为什么要学AI?
前端·人工智能·ai编程
哥布林学者15 小时前
高光谱成像(一)高光谱图像
机器学习·高光谱成像
罗西的思考16 小时前
AI Agent框架探秘:拆解 OpenHands(10)--- Runtime
人工智能·算法·机器学习
冬奇Lab16 小时前
OpenClaw 源码精读(2):Channel & Routing——一条消息如何找到它的 Agent?
人工智能·开源·源码阅读
冬奇Lab16 小时前
一天一个开源项目(第38篇):Claude Code Telegram - 用 Telegram 远程用 Claude Code,随时随地聊项目
人工智能·开源·资讯
格砸18 小时前
从入门到辞职|从ChatGPT到OpenClaw,跟上智能时代的进化
前端·人工智能·后端
可观测性用观测云18 小时前
可观测性 4.0:教系统如何思考
人工智能
sunny86518 小时前
Claude Code 跨会话上下文恢复:从 8 次纠正到 0 次的工程实践
人工智能·开源·github
小笼包包仔18 小时前
OpenClaw 多Agent软件开发最佳实践指南
人工智能