|
| 1 | +#!/usr/bin/env python |
1 | 2 | from __future__ import absolute_import |
2 | 3 | from __future__ import division |
3 | 4 | from __future__ import print_function |
|
21 | 22 | # Input arguments and options |
22 | 23 | parser = argparse.ArgumentParser() |
23 | 24 | # Input paths |
24 | | -parser.add_argument('--model', type=str, default='', |
25 | | - help='path to model to evaluate') |
| 25 | +parser.add_argument('--model', type=str, required=True, |
| 26 | + help='path to model to evaluate') |
26 | 27 | parser.add_argument('--cnn_model', type=str, default='resnet101', |
27 | | - help='resnet101, resnet152') |
28 | | -parser.add_argument('--infos_path', type=str, default='', |
29 | | - help='path to infos to evaluate') |
| 28 | + help='resnet101, resnet152') |
| 29 | +parser.add_argument('--infos_path', type=str, required=True, |
| 30 | + help='path to infos to evaluate') |
30 | 31 | # Basic options |
31 | 32 | parser.add_argument('--batch_size', type=int, default=0, |
32 | | - help='if > 0 then overrule, otherwise load from checkpoint.') |
| 33 | + help='if > 0 then overrule, otherwise load from checkpoint.') |
33 | 34 | parser.add_argument('--num_images', type=int, default=-1, |
34 | | - help='how many images to use when periodically evaluating the loss? (-1 = all)') |
| 35 | + help='how many images to use when periodically evaluating the loss? (-1 = all)') |
35 | 36 | parser.add_argument('--language_eval', type=int, default=0, |
36 | | - help='Evaluate language as well (1 = yes, 0 = no)? BLEU/CIDEr/METEOR/ROUGE_L? requires coco-caption code from Github.') |
| 37 | + help='Evaluate language as well (1 = yes, 0 = no)? BLEU/CIDEr/METEOR/ROUGE_L? requires coco-caption code from Github.') |
37 | 38 | parser.add_argument('--dump_images', type=int, default=1, |
38 | | - help='Dump images into vis/imgs folder for vis? (1=yes,0=no)') |
| 39 | + help='Dump images into vis/imgs folder for vis? (1=yes,0=no)') |
39 | 40 | parser.add_argument('--dump_json', type=int, default=1, |
40 | | - help='Dump json with predictions into vis folder? (1=yes,0=no)') |
| 41 | + help='Dump json with predictions into vis folder? (1=yes,0=no)') |
41 | 42 | parser.add_argument('--dump_path', type=int, default=0, |
42 | | - help='Write image paths along with predictions into vis json? (1=yes,0=no)') |
| 43 | + help='Write image paths along with predictions into vis json? (1=yes,0=no)') |
43 | 44 |
|
44 | 45 | # Sampling options |
45 | 46 | parser.add_argument('--sample_max', type=int, default=1, |
46 | | - help='1 = sample argmax words. 0 = sample from distributions.') |
| 47 | + help='1 = sample argmax words. 0 = sample from distributions.') |
47 | 48 | parser.add_argument('--beam_size', type=int, default=2, |
48 | | - help='used when sample_max = 1, indicates number of beams in beam search. Usually 2 or 3 works well. More is not better. Set this to 1 for faster runtime but a bit worse performance.') |
| 49 | + help='used when sample_max = 1, indicates number of beams in beam search. Usually 2 or 3 works well. More is not better. Set this to 1 for faster runtime but a bit worse performance.') |
49 | 50 | parser.add_argument('--temperature', type=float, default=1.0, |
50 | | - help='temperature when sampling from distributions (i.e. when sample_max = 0). Lower = "safer" predictions.') |
| 51 | + help='temperature when sampling from distributions (i.e. when sample_max = 0). Lower = "safer" predictions.') |
51 | 52 | # For evaluation on a folder of images: |
52 | | -parser.add_argument('--image_folder', type=str, default='', |
53 | | - help='If this is nonempty then will predict on the images in this folder path') |
54 | | -parser.add_argument('--image_root', type=str, default='', |
55 | | - help='In case the image paths have to be preprended with a root path to an image folder') |
| 53 | +parser.add_argument('--image_folder', type=str, default='', |
| 54 | + help='If this is nonempty then will predict on the images in this folder path') |
| 55 | +parser.add_argument('--image_root', type=str, default='', |
| 56 | + help='In case the image paths have to be preprended with a root path to an image folder') |
56 | 57 | # For evaluation on MSCOCO images from some split: |
57 | 58 | parser.add_argument('--input_fc_dir', type=str, default='', |
58 | | - help='path to the h5file containing the preprocessed dataset') |
| 59 | + help='path to the h5file containing the preprocessed dataset') |
59 | 60 | parser.add_argument('--input_att_dir', type=str, default='', |
60 | | - help='path to the h5file containing the preprocessed dataset') |
| 61 | + help='path to the h5file containing the preprocessed dataset') |
61 | 62 | parser.add_argument('--input_label_h5', type=str, default='', |
62 | | - help='path to the h5file containing the preprocessed dataset') |
63 | | -parser.add_argument('--input_json', type=str, default='', |
64 | | - help='path to the json file containing additional info and vocab. empty = fetch from model checkpoint.') |
65 | | -parser.add_argument('--split', type=str, default='test', |
66 | | - help='if running on MSCOCO images, which split to use: val|test|train') |
67 | | -parser.add_argument('--coco_json', type=str, default='', |
68 | | - help='if nonempty then use this file in DataLoaderRaw (see docs there). Used only in MSCOCO test evaluation, where we have a specific json file of only test set images.') |
| 63 | + help='path to the h5file containing the preprocessed dataset') |
| 64 | +parser.add_argument('--input_json', type=str, default='', |
| 65 | + help='path to the json file containing additional info and vocab. empty = fetch from model checkpoint.') |
| 66 | +parser.add_argument('--split', type=str, default='test', |
| 67 | + help='if running on MSCOCO images, which split to use: val|test|train') |
| 68 | +parser.add_argument('--coco_json', type=str, default='', |
| 69 | + help='if nonempty then use this file in DataLoaderRaw (see docs there). Used only in MSCOCO test evaluation, where we have a specific json file of only test set images.') |
69 | 70 | # misc |
70 | | -parser.add_argument('--id', type=str, default='', |
71 | | - help='an id identifying this run/job. used only if language_eval = 1 for appending to intermediate files') |
| 71 | +parser.add_argument('--id', type=str, default='', |
| 72 | + help='an id identifying this run/job. used only if language_eval = 1 for appending to intermediate files') |
72 | 73 |
|
73 | 74 | opt = parser.parse_args() |
74 | 75 |
|
75 | 76 | # Load infos |
76 | | -with open(opt.infos_path) as f: |
| 77 | +with open(opt.infos_path, 'rb') as f: |
77 | 78 | infos = cPickle.load(f) |
78 | 79 |
|
79 | 80 | # override and collect parameters |
|
106 | 107 |
|
107 | 108 | # Create the Data Loader instance |
108 | 109 | if len(opt.image_folder) == 0: |
109 | | - loader = DataLoader(opt) |
| 110 | + loader = DataLoader(opt) |
110 | 111 | else: |
111 | | - loader = DataLoaderRaw({'folder_path': opt.image_folder, |
| 112 | + loader = DataLoaderRaw({'folder_path': opt.image_folder, |
112 | 113 | 'coco_json': opt.coco_json, |
113 | 114 | 'batch_size': opt.batch_size, |
114 | 115 | 'cnn_model': opt.cnn_model}) |
|
118 | 119 |
|
119 | 120 |
|
120 | 121 | # Set sample options |
121 | | -loss, split_predictions, lang_stats = eval_utils.eval_split(model, crit, loader, |
| 122 | +loss, split_predictions, lang_stats = eval_utils.eval_split( |
| 123 | + model, crit, loader, |
122 | 124 | vars(opt)) |
123 | 125 |
|
124 | 126 | print('loss: ', loss) |
125 | 127 | if lang_stats: |
126 | | - print(lang_stats) |
| 128 | + print(lang_stats) |
127 | 129 |
|
128 | 130 | if opt.dump_json == 1: |
129 | 131 | # dump the json |
|
0 commit comments