In order to run my test.py on a CPU server, I modify the method init_detector's argument _device = 'cuda:0'_ to _'cpu'_ and add the _map_location = 'cpu'_ to _load_checkpoint()_ which is also in method init_detector of inference.py. Also, I set up a cpu-pytorch virtualenv to run it.
The error is as blow:
Traceback (most recent call last):
File "test.py", line 2, in
from mmdet.apis import init_detector, inference_detector, show_result
File "/home/tensor/mmdetection/mmdet/apis/__init__.py", line 3, in
from .inference import init_detector, inference_detector, show_result
File "/home/tensor/mmdetection/mmdet/apis/inference.py", line 9, in
from mmdet.core import get_classes
File "/home/tensor/mmdetection/mmdet/core/__init__.py", line 6, in
from .post_processing import * # noqa: F401, F403
File "/home/tensor/mmdetection/mmdet/core/post_processing/__init__.py", line 1, in
from .bbox_nms import multiclass_nms
File "/home/tensor/mmdetection/mmdet/core/post_processing/bbox_nms.py", line 3, in
from mmdet.ops.nms import nms_wrapper
File "/home/tensor/mmdetection/mmdet/ops/__init__.py", line 7, in
from .roi_align import RoIAlign, roi_align
File "/home/tensor/mmdetection/mmdet/ops/roi_align/__init__.py", line 1, in
from .functions.roi_align import roi_align
File "/home/tensor/mmdetection/mmdet/ops/roi_align/functions/roi_align.py", line 3, in
from .. import roi_align_cuda
ImportError: /home/tensor/mmdetection/mmdet/ops/roi_align/roi_align_cuda.cpython-36m-x86_64-linux-gnu.so: undefined symbol: __THCudaCheck
Actually where I modified has not been executed but the error happens. Is there anything else required to modify when running on a CPU-only machine?
This my test.py
#coding:utf-8
from mmdet.apis import init_detector, inference_detector, show_result
import os
from concat_video import concat
from split_video import split
import numpy as np
config_file = 'configs/retinanet_x101_32x4d_fpn_1x.py'
checkpoint_file = 'work_dirs/latest.pth'
model = init_detector(config_file, checkpoint_file)
imgs = os.listdir('pic_output_')
imgs = ['pic_output_/'+str(i+1)+'.jpg' for i in range(len(imgs))]
for i, result in enumerate(inference_detector(model, imgs)):
print(f'{i+1}/{len(imgs)}')
show_result(imgs[i], result, [model.CLASSES], out_file='video_output_/{}.jpg'.format(i+1))
concat()
This is the inference.py.
import warnings
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
import torch
from mmcv.runner import load_checkpoint
from mmdet.core import get_classes
from mmdet.datasets import to_tensor
from mmdet.datasets.transforms import ImageTransform
from mmdet.models import build_detector
def init_detector(config, checkpoint=None, device='cpu'):
"""Initialize a detector from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
'but got {}'.format(type(config)))
config.model.pretrained = None
model = build_detector(config.model, test_cfg=config.test_cfg)
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
warnings.warn('Class names are not saved in the checkpoint\'s '
'meta data, use COCO classes by default.')
model.CLASSES = get_classes('coco')
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
def inference_detector(model, imgs):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
If imgs is a str, a generator will be returned, otherwise return the
detection results directly.
"""
cfg = model.cfg
img_transform = ImageTransform(
size_divisor=cfg.data.test.size_divisor, **cfg.img_norm_cfg)
device = next(model.parameters()).device # model device
if not isinstance(imgs, list):
return _inference_single(model, imgs, img_transform, device)
else:
return _inference_generator(model, imgs, img_transform, device)
def _prepare_data(img, img_transform, cfg, device):
ori_shape = img.shape
img, img_shape, pad_shape, scale_factor = img_transform(
img,
scale=cfg.data.test.img_scale,
keep_ratio=cfg.data.test.get('resize_keep_ratio', True))
img = to_tensor(img).to(device).unsqueeze(0)
img_meta = [
dict(
ori_shape=ori_shape,
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=False)
]
return dict(img=[img], img_meta=[img_meta])
def _inference_single(model, img, img_transform, device):
img = mmcv.imread(img)
data = _prepare_data(img, img_transform, model.cfg, device)
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result
def _inference_generator(model, imgs, img_transform, device):
for img in imgs:
yield _inference_single(model, img, img_transform, device)
# TODO: merge this method with the one in BaseDetector
def show_result(img, result, class_names, score_thr=0.75, out_file=None):
"""Visualize the detection results on the image.
Args:
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
class_names (list[str] or tuple[str]): A list of class names.
score_thr (float): The threshold to visualize the bboxes and masks.
out_file (str, optional): If specified, the visualization result will
be written to the out file instead of shown in a window.
"""
assert isinstance(class_names, (tuple, list))
img = mmcv.imread(img)
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
# draw segmentation masks
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
# draw bounding boxes
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
mmcv.imshow_det_bboxes(
img.copy(),
bboxes,
labels,
class_names=class_names,
score_thr=score_thr,
show=out_file is None,
out_file=out_file)
BTW, my test.py runs correctly on a GPU machine in a cuda-pytorch environment.
Since the custom ops are compiled with CUDA support, they do not work on the CPU-only environment. We will consider adding support for that.
thanks for the reply
Has the only-CPU environment support met?
Most helpful comment
Since the custom ops are compiled with CUDA support, they do not work on the CPU-only environment. We will consider adding support for that.