I tried it on windows 10 platform,and when I want to train my dataset,error occurs .And here is the log.
How can I solve my problem?
'tail' 不是内部或外部命令,也不是可运行的程序
或批处理文件。
'gcc' 不是内部或外部命令,也不是可运行的程序
或批处理文件。
fatal: not a git repository (or any of the parent directories): .git
sys.platform: win32
Python: 3.6.12 |Anaconda, Inc.| (default, Sep 9 2020, 00:29:25) [MSC v.1916 64 bit (AMD64)]
CUDA available: True
GPU 0: GeForce RTX 2060 with Max-Q Design
CUDA_HOME: C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1
NVCC: Not Available
GCC: n/a
PyTorch: 1.6.0+cu101
PyTorch compiling details: PyTorch built with:
TorchVision: 0.7.0+cu101
OpenCV: 4.4.0
MMCV: 1.1.5
MMCV Compiler: MSVC 192729111
MMCV CUDA Compiler: 10.1
2020-11-29 16:17:54,254 - mmdet - INFO - Distributed training: False
2020-11-29 16:17:55,219 - mmdet - INFO - Config:
model = dict(
type='CascadeRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss', beta=0.1111111111111111, loss_weight=1.0)),
roi_head=dict(
type='CascadeRoIHead',
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=94,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=94,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=94,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
]))
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)
])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1000, 600),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type='VOCDataset',
ann_file=['data/VOCdevkit/VOC2007/ImageSets/Main/trainval.txt'],
img_prefix=['data/VOCdevkit/VOC2007/'],
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
])),
val=dict(
type='VOCDataset',
ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/trainval.txt',
img_prefix='data/VOCdevkit/VOC2007/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1000, 600),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]),
test=dict(
type='VOCDataset',
ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/trainval.txt',
img_prefix='data/VOCdevkit/VOC2007/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1000, 600),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]))
evaluation = dict(interval=1, metric='mAP')
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 11])
total_epochs = 12
checkpoint_config = dict(interval=1)
log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
work_dir = 'hudie_workdir'
gpu_ids = range(0, 1)
2020-11-29 16:17:56,399 - mmdet - INFO - load model from: torchvision://resnet50
2020-11-29 16:17:56,992 - mmdet - WARNING - The model and loaded state dict do not match exactly
unexpected key in source state_dict: fc.weight, fc.bias
fatal: not a git repository (or any of the parent directories): .git
2020-11-29 16:18:01,446 - mmdet - INFO - Start running, host: ASUS@LAPTOP-AC34UT2N, work_dir: D:\PycharmProjects\mmd2\hudie_workdir
2020-11-29 16:18:01,447 - mmdet - INFO - workflow: [('train', 1)], max: 12 epochs
Traceback (most recent call last):
File "tools/train.py", line 181, in
main()
File "tools/train.py", line 177, in main
meta=meta)
File "c:\users\asus\mmdetection\mmdet\apis\train.py", line 150, in train_detector
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
File "D:\anaconda\envs\mmd2\lib\site-packages\mmcv\runner\epoch_based_runner.py", line 125, in run
epoch_runner(data_loaders[i], **kwargs)
File "D:\anaconda\envs\mmd2\lib\site-packages\mmcv\runner\epoch_based_runner.py", line 47, in train
for i, data_batch in enumerate(self.data_loader):
File "D:\anaconda\envs\mmd2\lib\site-packages\torch\utils\data\dataloader.py", line 291, in __iter__
return _MultiProcessingDataLoaderIter(self)
File "D:\anaconda\envs\mmd2\lib\site-packages\torch\utils\data\dataloader.py", line 764, in __init__
self._try_put_index()
File "D:\anaconda\envs\mmd2\lib\site-packages\torch\utils\data\dataloader.py", line 994, in _try_put_index
index = self._next_index()
File "D:\anaconda\envs\mmd2\lib\site-packages\torch\utils\data\dataloader.py", line 357, in _next_index
return next(self._sampler_iter) # may raise StopIteration
File "D:\anaconda\envs\mmd2\lib\site-packages\torch\utils\data\sampler.py", line 208, in __iter__
for idx in self.sampler:
File "c:\users\asus\mmdetection\mmdet\datasets\samplersgroup_sampler.py", line 36, in __iter__
indices = np.concatenate(indices)
File "<__array_function__ internals>", line 6, in concatenate
ValueError: need at least one array to concatenate
I have tried many times during those days,but it did't work at all.Please someone help me?
It looks like I am having the same issue. Here is my bug report:
Describe the bug
I get the following error when I run train.py:
Traceback (most recent call last):
File "tools/train.py", line 181, in <module>
main()
File "tools/train.py", line 177, in main
meta=meta)
File "/mmdetection/mmdet/apis/train.py", line 150, in train_detector
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
File "/opt/conda/lib/python3.7/site-packages/mmcv/runner/epoch_based_runner.py", line 125, in run
epoch_runner(data_loaders[i], **kwargs)
File "/opt/conda/lib/python3.7/site-packages/mmcv/runner/epoch_based_runner.py", line 47, in train
for i, data_batch in enumerate(self.data_loader):
File "/opt/conda/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 291, in __iter__
return _MultiProcessingDataLoaderIter(self)
File "/opt/conda/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 764, in __init__
self._try_put_index()
File "/opt/conda/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 994, in _try_put_index
index = self._next_index()
File "/opt/conda/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 357, in _next_index
return next(self._sampler_iter) # may raise StopIteration
File "/opt/conda/lib/python3.7/site-packages/torch/utils/data/sampler.py", line 208, in __iter__
for idx in self.sampler:
File "/mmdetection/mmdet/datasets/samplers/group_sampler.py", line 36, in __iter__
indices = np.concatenate(indices)
File "<__array_function__ internals>", line 6, in concatenate
ValueError: need at least one array to concatenate
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/opt/conda/lib/python3.7/multiprocessing/popen_fork.py", line 28, in poll
pid, sts = os.waitpid(self.pid, flag)
File "/opt/conda/lib/python3.7/site-packages/torch/utils/data/_utils/signal_handling.py", line 66, in handler
_error_if_any_worker_fails()
RuntimeError: DataLoader worker (pid 49) is killed by signal: Terminated.
Reproduction
python tools/train.py configs/custom_dataset_ssd.py
# configs/custom_dataset_ssd.py
_base_ = 'ssd/ssd300_coco.py'
model = dict(
bbox_head=dict(num_classes=1))
dataset_type = 'COCODataset'
classes = ('pupil',)
data = dict(
train=dict(
img_prefix='/coco/train/',
classes=classes,
ann_file='/coco/train/annotation_coco.json',
dataset=dict(
ann_file='/coco/train/annotation_coco.json',
img_prefix='/coco/train/')),
val=dict(
img_prefix='/coco/val/',
classes=classes,
ann_file='/coco/val/annotation_coco.json'),
test=dict(
img_prefix='/coco/val/',
classes=classes,
ann_file='/coco/val/annotation_coco.json'))
A custom COCO-format dataset with only one class.
Environment
python mmdet/utils/collect_env.py to collect necessary environment information and paste it here.sys.platform: linux
Python: 3.7.7 (default, May 7 2020, 21:25:33) [GCC 7.3.0]
CUDA available: True
GPU 0: Tesla K80
CUDA_HOME: /usr/local/cuda
NVCC: Cuda compilation tools, release 10.1, V10.1.243
GCC: gcc (Ubuntu 7.4.0-1ubuntu1~18.04.1) 7.4.0
PyTorch: 1.6.0
PyTorch compiling details: PyTorch built with:
- GCC 7.3
- C++ Version: 201402
- Intel(R) Math Kernel Library Version 2020.0.1 Product Build 20200208 for Intel(R) 64 architecture applications
- Intel(R) MKL-DNN v1.5.0 (Git Hash e2ac1fac44c5078ca927cb9b90e1b3066a0b2ed0)
- OpenMP 201511 (a.k.a. OpenMP 4.5)
- NNPACK is enabled
- CPU capability usage: AVX2
- CUDA Runtime 10.1
- NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_61,code=sm_61;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_37,code=compute_37
- CuDNN 7.6.3
- Magma 2.5.2
- Build settings: BLAS=MKL, BUILD_TYPE=Release, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DUSE_VULKAN_WRAPPER -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, USE_CUDA=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_STATIC_DISPATCH=OFF,
TorchVision: 0.7.0
OpenCV: 4.4.0
MMCV: 1.2.1
MMCV Compiler: GCC 7.3
MMCV CUDA Compiler: 10.1
MMDetection: 2.7.0+3e902c3
$PATH, $LD_LIBRARY_PATH, $PYTHONPATH, etc.)I am using
nvidia-dockerwith the included Dockerfile. My dataset is mounted at/coco/
Error traceback
If applicable, paste the error trackback here.
2020-12-03 02:36:20,424 - mmdet - INFO - Distributed training: False
2020-12-03 02:36:20,828 - mmdet - INFO - Config:
input_size = 300
model = dict(
type='SingleStageDetector',
pretrained='open-mmlab://vgg16_caffe',
backbone=dict(
type='SSDVGG',
input_size=300,
depth=16,
with_last_pool=False,
ceil_mode=True,
out_indices=(3, 4),
out_feature_indices=(22, 34),
l2_norm_scale=20),
neck=None,
bbox_head=dict(
type='SSDHead',
in_channels=(512, 1024, 512, 256, 256, 256),
num_classes=1,
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.1, 0.1, 0.2, 0.2])))
cudnn_benchmark = True
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.0,
ignore_iof_thr=-1,
gt_max_assign_all=False),
smoothl1_beta=1.0,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False)
test_cfg = dict(
nms=dict(type='nms', iou_threshold=0.45),
min_bbox_size=0,
score_thr=0.02,
max_per_img=200)
dataset_type = 'COCODataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=[123.675, 116.28, 103.53],
to_rgb=True,
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[1, 1, 1],
to_rgb=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[1, 1, 1],
to_rgb=True),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
type='RepeatDataset',
times=5,
dataset=dict(
type='CocoDataset',
ann_file='/coco/train/annotation_coco.json',
img_prefix='/coco/train/',
pipeline=[
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=[123.675, 116.28, 103.53],
to_rgb=True,
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[1, 1, 1],
to_rgb=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]),
img_prefix='/coco/train/',
classes=('pupil', ),
ann_file='/coco/train/annotation_coco.json'),
val=dict(
type='CocoDataset',
ann_file='/coco/val/annotation_coco.json',
img_prefix='/coco/val/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[1, 1, 1],
to_rgb=True),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
],
classes=('pupil', )),
test=dict(
type='CocoDataset',
ann_file='/coco/val/annotation_coco.json',
img_prefix='/coco/val/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[1, 1, 1],
to_rgb=True),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
],
classes=('pupil', )))
evaluation = dict(interval=1, metric='bbox')
optimizer = dict(type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict()
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[16, 22])
total_epochs = 24
checkpoint_config = dict(interval=1)
log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
classes = ('pupil', )
work_dir = './work_dirs/custom_dataset_ssd'
gpu_ids = range(0, 1)
2020-12-03 02:36:21,016 - mmdet - INFO - load model from: open-mmlab://vgg16_caffe
2020-12-03 02:36:21,062 - mmdet - WARNING - The model and loaded state dict do not match exactly
missing keys in source state_dict: extra.0.weight, extra.0.bias, extra.1.weight, extra.1.bias, extra.2.weight, extra.2.bias, extra.3.weight, extra.3.bias, extra.4.weight, extra.4.bias, extra.5.weight, extra.5.bias, extra.6.weight, extra.6.bias, extra.7.weight, extra.7.bias, l2_norm.weight
2020-12-03 02:36:22,395 - mmdet - INFO - Start running, host: root@2acc5a4c80fa, work_dir: /mmdetection/work_dirs/custom_dataset_ssd
2020-12-03 02:36:22,395 - mmdet - INFO - workflow: [('train', 1)], max: 24 epochs
loading annotations into memory...
Done (t=0.00s)
creating index...
index created!
loading annotations into memory...
Done (t=0.00s)
creating index...
index created!
Traceback (most recent call last):
File "tools/train.py", line 181, in <module>
main()
File "tools/train.py", line 177, in main
meta=meta)
File "/mmdetection/mmdet/apis/train.py", line 150, in train_detector
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
File "/opt/conda/lib/python3.7/site-packages/mmcv/runner/epoch_based_runner.py", line 125, in run
epoch_runner(data_loaders[i], **kwargs)
File "/opt/conda/lib/python3.7/site-packages/mmcv/runner/epoch_based_runner.py", line 47, in train
for i, data_batch in enumerate(self.data_loader):
File "/opt/conda/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 291, in __iter__
return _MultiProcessingDataLoaderIter(self)
File "/opt/conda/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 764, in __init__
self._try_put_index()
File "/opt/conda/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 994, in _try_put_index
index = self._next_index()
File "/opt/conda/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 357, in _next_index
return next(self._sampler_iter) # may raise StopIteration
File "/opt/conda/lib/python3.7/site-packages/torch/utils/data/sampler.py", line 208, in __iter__
for idx in self.sampler:
File "/mmdetection/mmdet/datasets/samplers/group_sampler.py", line 36, in __iter__
indices = np.concatenate(indices)
File "<__array_function__ internals>", line 6, in concatenate
ValueError: need at least one array to concatenate
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/opt/conda/lib/python3.7/multiprocessing/popen_fork.py", line 28, in poll
pid, sts = os.waitpid(self.pid, flag)
File "/opt/conda/lib/python3.7/site-packages/torch/utils/data/_utils/signal_handling.py", line 66, in handler
_error_if_any_worker_fails()
RuntimeError: DataLoader worker (pid 117) is killed by signal: Terminated. ```
Bug fix
If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated!
The
indicesarray seems to be empty when it is concatenated. The only way that could happen is if every group size was zero. It seems like that meansdataset.flagmust be empty. I'm not sure under what circumstances that would happen.
In mmdetection/mmdet/datasets/samplers/group_sampler.py, it looks like dataset is a mmdet.datasets.dataset_wrappers.RepeatDataset and dataset.flag is, indeed [].
datset.CLASSES is also a list of what looks like the COCO classes. That seems like it could be a problem -- maybe I needed to declare my classes somewhere else??
After lots of failsure,I have given up .Later I have tried a similar framework --"detectron2".
And found it's easier to config .More importantly, there are no such bugs.
same problems when training yolo with my own dataset converted to coco format
same problems when training yolo with my own dataset converted to coco format
OK, problem solved by Explicitly specify the classes in the config file
it met the same question, it must be some problems in your _CLASSES_ in mmdet/datasets/coco.py and _num_classes_ in config_file
It looks like @Luoxsh6 was right. The problem was that I put my classes list in the train config dict (where it is in the example) instead of the train.dataset dict. Apparently that is required for SSD at least.
Bad:
data = dict(
...
train=dict(
classes=(...),
...
Good:
data = dict(
...
train=dict(
...
dataset=(
classes=(...),
...
I put it in both places to be safe. It doesn't seem to complain about unused config fields.
A better error message (or more consistent config format) would be really good though.
OKay,I am changing to use Detectron2.
I could do it following the custom dataset tutorial religiously and adding my custom dataset to ~/mmdetection/mmdet/datasets It seems that they changed the cfg object a lot from previous versions so very much attention should be put on it
With respect to adding my dataset to ~/mmdetection/mmdet/datasets, in addition to what is done in the custom dataset tutorial, I add it to ~/mmdetection/mmdet/datasets/__init__.py to export it properly as the others datasets are exported in there
By the way my datasets use COCO format for which I copy ~/mmdetection/mmdet/datasets/coco.py as a template, modify its class name to be the one of my dataset and the variable CLASSES to be equal to the names of the 'categories' specified in my COCO annotation (json) files
I am getting the same error. Here is my config file ssd model:
`_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
bbox_head=dict(num_classes=2),
)
classes = ('truck', 'coupler')
dataset_type = 'CocoDataset'
data_root = '/home/bita/mmdetection/configs/ssd_coupler/multilevel/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='Normalize', *img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', *img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=0,
train=dict(
type=dataset_type,
ann_file=data_root + 'train/annotations_train_multi.json',
img_prefix=data_root + 'train/',
pipeline=train_pipeline,
),
val=dict(
type=dataset_type,
ann_file=data_root + 'val/annotations_val_multi.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'test/annotations_test_multi.json',
img_prefix=data_root + 'test/',
pipeline=test_pipeline))
optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict(_delete_=True)
`
the error traceback:
Traceback (most recent call last):
File "tools/train.py", line 181, in <module>
main()
File "tools/train.py", line 170, in main
train_detector(
File "/home/bita/mmdetection/mmdet/apis/train.py", line 150, in train_detector
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
File "/home/bita/anaconda3/envs/open-mmlab/lib/python3.8/site-packages/mmcv/runner/epoch_based_runner.py", line 125, in run
epoch_runner(data_loaders[i], **kwargs)
File "/home/bita/anaconda3/envs/open-mmlab/lib/python3.8/site-packages/mmcv/runner/epoch_based_runner.py", line 47, in train
for i, data_batch in enumerate(self.data_loader):
File "/home/bita/anaconda3/envs/open-mmlab/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 435, in __next__
data = self._next_data()
File "/home/bita/anaconda3/envs/open-mmlab/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 474, in _next_data
index = self._next_index() # may raise StopIteration
File "/home/bita/anaconda3/envs/open-mmlab/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 427, in _next_index
return next(self._sampler_iter) # may raise StopIteration
File "/home/bita/anaconda3/envs/open-mmlab/lib/python3.8/site-packages/torch/utils/data/sampler.py", line 227, in __iter__
for idx in self.sampler:
File "/home/bita/mmdetection/mmdet/datasets/samplers/group_sampler.py", line 36, in __iter__
indices = np.concatenate(indices)
File "<__array_function__ internals>", line 5, in concatenate
ValueError: need at least one array to concatenate
Hi @ZwwWayne
I am also having the same issue with training after transforming my labeled custom data into COCO format with only one class.
Other threads with the same issue have been closed (e.g. #210, #2198, #3628), but none of the proposed solutions fix my error.
I am using /configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py.
Done (t=0.46s)
creating index...
index created!
2020-12-15 13:38:59,283 - mmdet - INFO - Start running, host: root@e6ed65f77dcc, work_dir: /content/drive/My Drive/mmdetection/work_dirs/faster_rcnn_r50_fpn_1x_coco
2020-12-15 13:38:59,283 - mmdet - INFO - workflow: [('train', 1)], max: 20 epochs
2020-12-15 13:38:59.488541: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.1
Traceback (most recent call last):
File "tools/train.py", line 181, in
main()
File "tools/train.py", line 177, in main
meta=meta)
File "/usr/local/lib/python3.6/dist-packages/mmdet-2.7.0-py3.6.egg/mmdet/apis/train.py", line 150, in train_detector
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
File "/usr/local/lib/python3.6/dist-packages/mmcv/runner/epoch_based_runner.py", line 125, in run
epoch_runner(data_loaders[i], **kwargs)
File "/usr/local/lib/python3.6/dist-packages/mmcv/runner/epoch_based_runner.py", line 47, in train
for i, data_batch in enumerate(self.data_loader):
File "/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py", line 352, in __iter__
return self._get_iterator()
File "/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py", line 294, in _get_iterator
return _MultiProcessingDataLoaderIter(self)
File "/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py", line 827, in __init__
self._reset(loader, first_iter=True)
File "/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py", line 857, in _reset
self._try_put_index()
File "/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py", line 1091, in _try_put_index
index = self._next_index()
File "/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py", line 427, in _next_index
return next(self._sampler_iter) # may raise StopIteration
File "/usr/local/lib/python3.6/dist-packages/torch/utils/data/sampler.py", line 227, in __iter__
for idx in self.sampler:
File "/usr/local/lib/python3.6/dist-packages/mmdet-2.7.0-py3.6.egg/mmdet/datasets/samplers/group_sampler.py", line 36, in __iter__
indices = np.concatenate(indices)
File "<__array_function__ internals>", line 6, in concatenate
ValueError: need at least one array to concatenate
I modify the CLASSES in ~/mmdetection/mmdet/datasets/coco.py to adapt to my dataset, and do not specify the classes in the config file. It worked. Then I met another error. So I comment out the corresponding code. It start training normally.
I modify the CLASSES in
~/mmdetection/mmdet/datasets/coco.pyto adapt to my dataset, and do not specify the classes in the config file. It worked. Then I met another error. So I comment out the corresponding code. It start training normally.
Please tell me how to modify it. Thank you very much