11import os
2- from solaris .eval .base import Evaluator
3- import solaris
2+
43import geopandas as gpd
54import pandas as pd
65
6+ import solaris
7+ from solaris .eval .base import Evaluator
8+
79data_dir = os .path .abspath (os .path .join (os .path .dirname (__file__ ), "../data/" ))
810
11+
912class TestEvaluator (object ):
1013 def test_init_from_file (self ):
1114 """Test instantiation of an Evaluator instance from a file."""
12- base_instance = Evaluator (os .path .join (data_dir ,
13- 'gt.geojson' ))
15+ base_instance = Evaluator (os .path .join (data_dir , "gt.geojson" ))
1416 gdf = solaris .data .gt_gdf ()
1517 assert base_instance .ground_truth_sindex .bounds == gdf .sindex .bounds
1618 assert base_instance .proposal_GDF .equals (gpd .GeoDataFrame ([]))
1719 assert base_instance .ground_truth_GDF .equals (
18- base_instance .ground_truth_GDF_Edit )
20+ base_instance .ground_truth_GDF_Edit
21+ )
1922
2023 def test_init_from_gdf (self ):
2124 """Test instantiation of an Evaluator from a pre-loaded GeoDataFrame."""
@@ -24,72 +27,76 @@ def test_init_from_gdf(self):
2427 assert base_instance .ground_truth_sindex .bounds == gdf .sindex .bounds
2528 assert base_instance .proposal_GDF .equals (gpd .GeoDataFrame ([]))
2629 assert base_instance .ground_truth_GDF .equals (
27- base_instance .ground_truth_GDF_Edit )
30+ base_instance .ground_truth_GDF_Edit
31+ )
2832
2933 def test_init_empty_geojson (self ):
3034 """Test instantiation of Evaluator with an empty geojson file."""
31- base_instance = Evaluator (os .path .join (data_dir ,
32- 'empty.geojson' ))
33- expected_gdf = gpd .GeoDataFrame ({'sindex' : [],
34- 'condition' : [],
35- 'geometry' : []})
35+ base_instance = Evaluator (os .path .join (data_dir , "empty.geojson" ))
36+ expected_gdf = gpd .GeoDataFrame ({"sindex" : [], "condition" : [], "geometry" : []})
3637 assert base_instance .ground_truth_GDF .equals (expected_gdf )
3738
3839 def test_score_proposals (self ):
3940 """Test reading in a proposal GDF from a geojson and scoring it."""
40- eb = Evaluator (os .path .join (data_dir , ' gt.geojson' ))
41- eb .load_proposal (os .path .join (data_dir , ' pred.geojson' ))
41+ eb = Evaluator (os .path .join (data_dir , " gt.geojson" ))
42+ eb .load_proposal (os .path .join (data_dir , " pred.geojson" ))
4243 pred_gdf = solaris .data .pred_gdf ()
4344 assert eb .proposal_GDF .iloc [:, 0 :3 ].sort_index ().equals (pred_gdf )
44- expected_score = [{'class_id' : 'all' ,
45- 'iou_field' : 'iou_score_all' ,
46- 'TruePos' : 8 ,
47- 'FalsePos' : 20 ,
48- 'FalseNeg' : 20 ,
49- 'Precision' : 0.2857142857142857 ,
50- 'Recall' : 0.2857142857142857 ,
51- 'F1Score' : 0.2857142857142857 }]
45+ expected_score = [
46+ {
47+ "class_id" : "all" ,
48+ "iou_field" : "iou_score_all" ,
49+ "TruePos" : 8 ,
50+ "FalsePos" : 20 ,
51+ "FalseNeg" : 20 ,
52+ "Precision" : 0.2857142857142857 ,
53+ "Recall" : 0.2857142857142857 ,
54+ "F1Score" : 0.2857142857142857 ,
55+ }
56+ ]
5257 scores = eb .eval_iou (calculate_class_scores = False )
5358 assert scores == expected_score
5459
5560 def test_score_proposals_return_gdfs (self ):
56- eb = Evaluator (os .path .join (data_dir , 'gt.geojson' ))
57- eb .load_proposal (os .path .join (data_dir , 'pred.geojson' ))
58- expected_score = [{'class_id' : 'all' ,
59- 'iou_field' : 'iou_score_all' ,
60- 'TruePos' : 8 ,
61- 'FalsePos' : 20 ,
62- 'FalseNeg' : 20 ,
63- 'Precision' : 0.2857142857142857 ,
64- 'Recall' : 0.2857142857142857 ,
65- 'F1Score' : 0.2857142857142857 }]
61+ eb = Evaluator (os .path .join (data_dir , "gt.geojson" ))
62+ eb .load_proposal (os .path .join (data_dir , "pred.geojson" ))
63+ expected_score = [
64+ {
65+ "class_id" : "all" ,
66+ "iou_field" : "iou_score_all" ,
67+ "TruePos" : 8 ,
68+ "FalsePos" : 20 ,
69+ "FalseNeg" : 20 ,
70+ "Precision" : 0.2857142857142857 ,
71+ "Recall" : 0.2857142857142857 ,
72+ "F1Score" : 0.2857142857142857 ,
73+ }
74+ ]
6675 scores , tp_gdf , fn_gdf , fp_gdf = eb .eval_iou_return_GDFs (
67- calculate_class_scores = False )
76+ calculate_class_scores = False
77+ )
6878 assert scores == expected_score
69- assert len (tp_gdf ) == expected_score [0 ][' TruePos' ]
70- assert len (fp_gdf ) == expected_score [0 ][' FalsePos' ]
71- assert len (fn_gdf ) == expected_score [0 ][' FalseNeg' ]
79+ assert len (tp_gdf ) == expected_score [0 ][" TruePos" ]
80+ assert len (fp_gdf ) == expected_score [0 ][" FalsePos" ]
81+ assert len (fn_gdf ) == expected_score [0 ][" FalseNeg" ]
7282
7383 def test_iou_by_building (self ):
7484 """Test output of ground truth table with per-building IoU scores"""
7585 data_folder = data_dir
76- path_truth = os .path .join (data_folder , ' SN2_sample_truth.csv' )
77- path_pred = os .path .join (data_folder , ' SN2_sample_preds.csv' )
78- path_ious = os .path .join (data_folder , ' SN2_sample_iou_by_building.csv' )
79- path_temp = ' ./temp.pd'
86+ path_truth = os .path .join (data_folder , " SN2_sample_truth.csv" )
87+ path_pred = os .path .join (data_folder , " SN2_sample_preds.csv" )
88+ path_ious = os .path .join (data_folder , " SN2_sample_iou_by_building.csv" )
89+ path_temp = " ./temp.pd"
8090 eb = Evaluator (path_truth )
81- eb .load_proposal (path_pred , conf_field_list = ['Confidence' ],
82- proposalCSV = True )
83- eb .eval_iou_spacenet_csv (miniou = 0.5 , imageIDField = 'ImageId' ,
84- min_area = 20 )
91+ eb .load_proposal (path_pred , conf_field_list = ["Confidence" ], proposalCSV = True )
92+ eb .eval_iou_spacenet_csv (miniou = 0.5 , imageIDField = "ImageId" , min_area = 20 )
8593 output = eb .get_iou_by_building ()
8694 result_actual = pd .DataFrame (output )
87- result_actual .sort_values (by = [' ImageId' , ' BuildingId' ], inplace = True )
88- ious_actual = list (result_actual [' iou_score' ])
95+ result_actual .sort_values (by = [" ImageId" , " BuildingId" ], inplace = True )
96+ ious_actual = list (result_actual [" iou_score" ])
8997 result_expected = pd .read_csv (path_ious , index_col = 0 )
90- result_expected .sort_values (by = ['ImageId' , 'BuildingId' ], inplace = True )
91- ious_expected = list (result_expected ['iou_score' ])
92- maxdifference = max ([abs (x - y ) for x , y in zip (ious_actual ,
93- ious_expected )])
94- epsilon = 1E-9
98+ result_expected .sort_values (by = ["ImageId" , "BuildingId" ], inplace = True )
99+ ious_expected = list (result_expected ["iou_score" ])
100+ maxdifference = max ([abs (x - y ) for x , y in zip (ious_actual , ious_expected )])
101+ epsilon = 1e-9
95102 assert maxdifference < epsilon
0 commit comments