Shortcuts

Note

You are reading the documentation for MMEditing 0.x, which will soon be deprecated by the end of 2022. We recommend you upgrade to MMEditing 1.0 to enjoy fruitful new features and better performance brought by OpenMMLab 2.0. Check out the changelog, code and documentation of MMEditing 1.0 for more details.

Source code for mmedit.datasets.base_matting_dataset

# Copyright (c) OpenMMLab. All rights reserved.
from collections import defaultdict

from .base_dataset import BaseDataset
from .registry import DATASETS


[docs]@DATASETS.register_module() class BaseMattingDataset(BaseDataset): """Base image matting dataset.""" def __init__(self, ann_file, pipeline, data_prefix=None, test_mode=False): super().__init__(pipeline, test_mode) self.ann_file = str(ann_file) self.data_prefix = str(data_prefix) self.data_infos = self.load_annotations()
[docs] def evaluate(self, results, logger=None): """Evaluating with different metrics. Args: results (list[tuple]): The output of forward_test() of the model. Return: dict: Evaluation results dict. """ if not isinstance(results, list): raise TypeError(f'results must be a list, but got {type(results)}') assert len(results) == len(self), ( 'The length of results is not equal to the ' f'dataset len: {len(results)} != {len(self)}') results = [res['eval_result'] for res in results] # a list of dict eval_result = defaultdict(list) # a dict of list for res in results: for metric, val in res.items(): eval_result[metric].append(val) for metric, val_list in eval_result.items(): assert len(val_list) == len(self), ( f'Length of evaluation result of {metric} is {len(val_list)}, ' f'should be {len(self)}') # average the results eval_result = { metric: sum(values) / len(self) for metric, values in eval_result.items() } return eval_result
Read the Docs v: latest
Versions
latest
stable
1.x
v0.16.0
v0.15.2
v0.15.1
v0.15.0
v0.14.0
v0.13.0
v0.12.0
dev-1.x
Downloads
pdf
html
epub
On Read the Docs
Project Home
Builds

Free document hosting provided by Read the Docs.