work_dir = 'records/guoshoucai_auto_gen_ps_with_tianchi_psccnet_baseline_dct_balance_scale_0_05_1_0_15_epochs_cls_weight_1_5_more_negs_seed_4567' dataset_type = 'MaskSegDatasetv2' img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) input_size = (512, 512) train_pre_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', binary=True, train=True, img_label_binary=True) ] train_post_pipeline = [ dict(type='SimpleResize', size=(512, 512)), dict(type='RandomFlip', prob=0.5), dict( type='Normalizev2', mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg', 'img_label']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='SimpleResize', size=(512, 512)), dict( type='Normalizev2', mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ] data = dict( samples_per_gpu=1, workers_per_gpu=4, train=dict( type='MaskSegDatasetv2', data_root='/mnt/disk1/data/image_forgery/text_forgery', ann_path='guoshoucai_auto_gen_ps_with_tianchi_1.txt', pipeline=[[{ 'type': 'LoadImageFromFile' }, { 'type': 'LoadAnnotations', 'binary': True, 'train': True, 'img_label_binary': True }], [{ 'type': 'SimpleResize', 'size': (512, 512) }, { 'type': 'RandomFlip', 'prob': 0.5 }, { 'type': 'Normalizev2', 'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225] }, { 'type': 'DefaultFormatBundle' }, { 'type': 'Collect', 'keys': ['img', 'gt_semantic_seg', 'img_label'] }]]), val=[ dict( type='MaskSegDatasetv2', data_root= '/mnt/disk1/data/image_forgery/text_forgery/guoshoucai_auto_gen/test_forged_with_ps', ann_path='test_1.txt', test_mode=True, pipeline=[ dict(type='LoadImageFromFile'), dict(type='SimpleResize', size=(512, 512)), dict( type='Normalizev2', mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ], dataset_name='guoshoucai_text', gt_seg_map_loader_cfg=dict(binary=True, img_label_binary=True)), dict( type='MaskSegDatasetv2', data_root= '/mnt/disk1/data/image_forgery/text_forgery/tianchi_text_forgory', ann_path='val.txt', test_mode=True, pipeline=[ dict(type='LoadImageFromFile'), dict(type='SimpleResize', size=(512, 512)), dict( type='Normalizev2', mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ], dataset_name='tianchi', gt_seg_map_loader_cfg=dict(binary=True, img_label_binary=True)) ]) norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='PSCCDetector', base_model=dict( type='PSCCNet', crop_size=(512, 512), pretrained= '/home/yangwu/.cache/torch/checkpoints/hrnet_w18_small_v2.pth'), train_cfg=dict( seg_loss=dict(type='BCELoss', reduction='none'), seg_loss_weights=(1.0, 1.0), mask_loss_weights=(1.0, 1.0, 1.0, 1.0), cls_loss=dict( type='CrossEntropyLoss', use_sigmoid=False, class_weight=(1.0, 1.0)), p_balance_scale=0.05, n_balance_scale=1.0), test_cfg=dict()) optimizer = dict(type='Adam', lr=0.0001, weight_decay=1e-05) optimizer_config = dict() lr_config = dict(policy='CosineAnnealing', min_lr=1e-07, by_epoch=False) runner = dict(type='IterBasedRunner', max_iters=121960) checkpoint_config = dict(by_epoch=False, interval=4065, max_keep_ckpts=1) evaluation = dict( interval=4065, metric='mFscore', pre_eval=True, mean=False, thresh=0.5, img_thresh=0.5) log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook', by_epoch=False), dict(type='TensorboardLoggerHook') ]) ext_test_dataset = ['CASIA1'] dist_params = dict(backend='nccl') log_level = 'INFO' load_from = None resume_from = None workflow = [('train', 1)] cudnn_benchmark = True find_unused_parameters = False auto_resume = False gpu_ids = range(0, 4)