Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ValueError: Annotation must have data_list and metainfo keys #3046

Closed
2 tasks done
690696306 opened this issue May 16, 2024 · 0 comments
Closed
2 tasks done

ValueError: Annotation must have data_list and metainfo keys #3046

690696306 opened this issue May 16, 2024 · 0 comments
Assignees

Comments

@690696306
Copy link

Prerequisite

Environment

py10_cuda118_torch200

Reproduces the problem - code sample

Train config file

base = ['../../../base/default_runtime.py']

runtime

max_epochs = 270
stage2_num_epochs = 30
base_lr = 4e-3

train_cfg = dict(max_epochs=max_epochs, val_interval=10)
randomness = dict(seed=21)

optimizer

optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05),
paramwise_cfg=dict(
norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True))

learning rate

param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0e-5,
by_epoch=False,
begin=0,
end=1000),
dict(
type='CosineAnnealingLR',
eta_min=base_lr * 0.05,
begin=max_epochs // 2,
end=max_epochs,
T_max=max_epochs // 2,
by_epoch=True,
convert_to_iter_based=True),
]

automatically scaling LR based on the actual training batch size

auto_scale_lr = dict(base_batch_size=512)

codec settings

codec = dict(
type='SimCCLabel',
input_size=(288, 384),
sigma=(6., 6.93),
simcc_split_ratio=2.0,
normalize=False,
use_dark=False)

model settings

model = dict(
type='TopdownPoseEstimator',
data_preprocessor=dict(
type='PoseDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
scope='mmdet',
type='CSPNeXt',
arch='P5',
expand_ratio=0.5,
deepen_factor=1.,
widen_factor=1.,
out_indices=(4, ),
channel_attention=True,
norm_cfg=dict(type='SyncBN'),
act_cfg=dict(type='SiLU'),
init_cfg=dict(
type='Pretrained',
prefix='backbone.',
checkpoint='https://download.openmmlab.com/mmpose/v1/projects/'
'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa: E501
)),
head=dict(
type='RTMCCHead',
in_channels=1024,
out_channels=133,
input_size=codec['input_size'],
in_featuremap_size=tuple([s // 32 for s in codec['input_size']]),
simcc_split_ratio=codec['simcc_split_ratio'],
final_layer_kernel_size=7,
gau_cfg=dict(
hidden_dims=256,
s=128,
expansion_factor=2,
dropout_rate=0.,
drop_path=0.,
act_fn='SiLU',
use_rel_bias=False,
pos_enc=False),
loss=dict(
type='KLDiscretLoss',
use_target_weight=True,
beta=10.,
label_softmax=True),
decoder=codec),
test_cfg=dict(flip_test=True, ))

base dataset settings

dataset_type = 'MyCustomDataset'
#data_mode = 'topdown'
data_root = 'data/coco/'

backend_args = dict(backend='local')

backend_args = dict(

backend='petrel',

path_mapping=dict({

f'{data_root}': 's3://openmmlab/datasets/detection/coco/',

f'{data_root}': 's3://openmmlab/datasets/detection/coco/'

}))

pipelines

train_pipeline = [
dict(type='LoadImage', backend_args=backend_args),
dict(type='GetBBoxCenterScale'),
dict(type='RandomFlip', direction='horizontal'),
dict(type='RandomHalfBody'),
dict(
type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='mmdet.YOLOXHSVRandomAug'),
dict(
type='Albumentation',
transforms=[
dict(type='Blur', p=0.1),
dict(type='MedianBlur', p=0.1),
dict(
type='CoarseDropout',
max_holes=1,
max_height=0.4,
max_width=0.4,
min_holes=1,
min_height=0.2,
min_width=0.2,
p=1.0),
]),
dict(type='GenerateTarget', encoder=codec),
dict(type='PackPoseInputs')
]
val_pipeline = [
dict(type='LoadImage', backend_args=backend_args),
dict(type='GetBBoxCenterScale'),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='PackPoseInputs')
]

train_pipeline_stage2 = [
dict(type='LoadImage', backend_args=backend_args),
dict(type='GetBBoxCenterScale'),
dict(type='RandomFlip', direction='horizontal'),
dict(type='RandomHalfBody'),
dict(
type='RandomBBoxTransform',
shift_factor=0.,
scale_factor=[0.75, 1.25],
rotate_factor=60),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='mmdet.YOLOXHSVRandomAug'),
dict(
type='Albumentation',
transforms=[
dict(type='Blur', p=0.1),
dict(type='MedianBlur', p=0.1),
dict(
type='CoarseDropout',
max_holes=1,
max_height=0.4,
max_width=0.4,
min_holes=1,
min_height=0.2,
min_width=0.2,
p=0.5),
]),
dict(type='GenerateTarget', encoder=codec),
dict(type='PackPoseInputs')
]

data loaders

train_dataloader = dict(
batch_size=32,
num_workers=10,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type=dataset_type,
data_root=data_root,
#data_mode=data_mode,
ann_file='annotations/coco_wholebody_train_v1.0.json',
data_prefix=dict(img='train2017/'),
pipeline=train_pipeline,
metainfo=dict(from_file='configs/base/datasets/coco_wholebody.py'),
))
val_dataloader = dict(
batch_size=32,
num_workers=10,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
#data_mode=data_mode,
ann_file='annotations/coco_wholebody_val_v1.0.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
#bbox_file='data/coco/person_detection_results/'
#'COCO_val2017_detections_AP_H_56_person.json',
pipeline=val_pipeline,
metainfo=dict(from_file='configs/base/datasets/coco_wholebody.py'),
))
test_dataloader = val_dataloader

hooks

#default_hooks = dict(
#checkpoint=dict(
#save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1))

custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49),
dict(
type='mmdet.PipelineSwitchHook',
switch_epoch=max_epochs - stage2_num_epochs,
switch_pipeline=train_pipeline_stage2)
]

evaluators

#val_evaluator = dict(
#type='CocoWholeBodyMetric',
#ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json')

val_evaluator = [
dict(type='PCKAccuracy', thr=0.2),
dict(type='AUC'),
dict(type='EPE'),
]

test_evaluator = val_evaluator

Custom datasets

from mmengine.dataset import BaseDataset
from mmpose.registry import DATASETS

import copy
import os.path as osp
from typing import Optional

import numpy as np

@DATASETS.register_module(name='MyCustomDataset')
class MyCustomDataset(BaseDataset):

METAINFO: dict = dict(
    from_file='configs/_base_/datasets/coco_wholebody.py')

Reproduces the problem - command or script

python tools/train.py /home/lab/Anaconda_project/mmpose/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py --work-dir /home/lab/Anaconda_project/mmpose/weights

Reproduces the problem - error message

/home/lab/Anaconda_project/mmpose/mmpose/datasets/transforms/common_transforms.py:656: UserWarning: Blur is not pixel-level transformations. Please use with caution.
warnings.warn(
/home/lab/Anaconda_project/mmpose/mmpose/datasets/transforms/common_transforms.py:656: UserWarning: MedianBlur is not pixel-level transformations. Please use with caution.
warnings.warn(
/home/lab/Anaconda_project/mmpose/mmpose/datasets/transforms/common_transforms.py:656: UserWarning: CoarseDropout is not pixel-level transformations. Please use with caution.
warnings.warn(
Traceback (most recent call last):
File "/home/lab/Anaconda_project/mmpose/tools/train.py", line 162, in
main()
File "/home/lab/Anaconda_project/mmpose/tools/train.py", line 158, in main
runner.train()
File "/home/lab/.local/lib/python3.10/site-packages/mmengine/runner/runner.py", line 1728, in train
self._train_loop = self.build_train_loop(
File "/home/lab/.local/lib/python3.10/site-packages/mmengine/runner/runner.py", line 1527, in build_train_loop
loop = EpochBasedTrainLoop(
File "/home/lab/.local/lib/python3.10/site-packages/mmengine/runner/loops.py", line 44, in init
super().init(runner, dataloader)
File "/home/lab/.local/lib/python3.10/site-packages/mmengine/runner/base_loop.py", line 26, in init
self.dataloader = runner.build_dataloader(
File "/home/lab/.local/lib/python3.10/site-packages/mmengine/runner/runner.py", line 1370, in build_dataloader
dataset = DATASETS.build(dataset_cfg)
File "/home/lab/.local/lib/python3.10/site-packages/mmengine/registry/registry.py", line 570, in build
return self.build_func(cfg, *args, **kwargs, registry=self)
File "/home/lab/.local/lib/python3.10/site-packages/mmengine/registry/build_functions.py", line 121, in build_from_cfg
obj = obj_cls(**args) # type: ignore
File "/home/lab/.local/lib/python3.10/site-packages/mmengine/dataset/base_dataset.py", line 247, in init
self.full_init()
File "/home/lab/.local/lib/python3.10/site-packages/mmengine/dataset/base_dataset.py", line 298, in full_init
self.data_list = self.load_data_list()
File "/home/lab/.local/lib/python3.10/site-packages/mmengine/dataset/base_dataset.py", line 440, in load_data_list
raise ValueError('Annotation must have data_list and metainfo '
ValueError: Annotation must have data_list and metainfo keys

Additional information

Hi,I want to use my customize data to train ,and I follow the doc to build file.But when I run train.py , I keep getting [ValueError: Annotation must have data_list and metainfo keys] error.

I thought it might be the problem with my config file,so I changed all the file path to the original coco_wholebody config but remain with dataset_type = 'MyCustomDataset'

out_channels=133,
ann_file='annotations/coco_wholebody_train_v1.0.json',
data_prefix=dict(img='train2017/'),
ann_file='annotations/coco_wholebody_val_v1.0.json',
data_prefix=dict(img='val2017/'),
metainfo=dict(from_file='configs/base/datasets/coco_wholebody.py')

but I still get this error.The way to fix this is to change the dataset_type = 'MyCustomDataset' to dataset_type ='CocoWholeBodyDataset'.But this way I can not train the custom dataset.

Does any one have this problem before?

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants