@@ -102,6 +102,39 @@ mod blocking_and_async_io {
102102 ) ?;
103103 Ok ( ( ) )
104104 }
105+ fn check_fetch_output (
106+ repo : & gix:: Repository ,
107+ out : gix:: remote:: fetch:: Outcome ,
108+ expected_count : usize ,
109+ ) -> gix_testtools:: Result {
110+ for local_tracking_branch_name in out. ref_map . mappings . into_iter ( ) . filter_map ( |m| m. local ) {
111+ let r = repo. find_reference ( & local_tracking_branch_name) ?;
112+ r. id ( )
113+ . object ( )
114+ . expect ( "object should be present after fetching, triggering pack refreshes works" ) ;
115+ repo. head_ref ( ) ?. unwrap ( ) . set_target_id ( r. id ( ) , "post fetch" ) ?;
116+ }
117+ check_odb_accessability ( repo, expected_count) ?;
118+ Ok ( ( ) )
119+ }
120+ fn check_odb_accessability ( repo : & gix:: Repository , expected_count : usize ) -> gix_testtools:: Result {
121+ let mut count_unique = 0 ;
122+ // TODO: somehow there is a lot of duplication when receiving objects.
123+ let mut seen = gix_hashtable:: HashSet :: default ( ) ;
124+ for id in repo. objects . iter ( ) ? {
125+ let id = id?;
126+ if !seen. insert ( id) {
127+ continue ;
128+ }
129+ let _obj = repo. find_object ( id) ?;
130+ count_unique += 1 ;
131+ }
132+ assert_eq ! (
133+ count_unique, expected_count,
134+ "Each round we receive exactly one commit, effectively"
135+ ) ;
136+ Ok ( ( ) )
137+ }
105138 for max_packs in 1 ..=3 {
106139 let remote_dir = tempfile:: tempdir ( ) ?;
107140 let mut remote_repo = gix:: init_bare ( remote_dir. path ( ) ) ?;
@@ -128,25 +161,33 @@ mod blocking_and_async_io {
128161 Fetch ,
129162 )
130163 . expect ( "remote is configured after clone" ) ?;
131- for _round_to_create_pack in 1 ..12 {
164+ let minimum_slots = 5 ;
165+ let slots = Slots :: AsNeededByDiskState {
166+ multiplier : 1.1 ,
167+ minimum : minimum_slots,
168+ } ;
169+ let one_more_than_minimum = minimum_slots + 1 ;
170+ for round_to_create_pack in 1 ..one_more_than_minimum {
171+ let expected_object_count = round_to_create_pack + 1 + 1 /* first commit + tree */ ;
132172 create_empty_commit ( & remote_repo) ?;
133173 match remote
134174 . connect ( Fetch ) ?
135175 . prepare_fetch ( gix:: progress:: Discard , Default :: default ( ) ) ?
136176 . receive ( gix:: progress:: Discard , & IS_INTERRUPTED )
137177 {
138- Ok ( out) => {
139- for local_tracking_branch_name in out. ref_map . mappings . into_iter ( ) . filter_map ( |m| m. local ) {
140- let r = local_repo. find_reference ( & local_tracking_branch_name) ?;
141- r. id ( )
142- . object ( )
143- . expect ( "object should be present after fetching, triggering pack refreshes works" ) ;
144- local_repo. head_ref ( ) ?. unwrap ( ) . set_target_id ( r. id ( ) , "post fetch" ) ?;
145- }
178+ Ok ( out) => check_fetch_output ( & local_repo, out, expected_object_count) ?,
179+ Err ( err) => {
180+ assert ! ( err
181+ . to_string( )
182+ . starts_with( "The slotmap turned out to be too small with " ) ) ;
183+ // But opening a new repo will always be able to read all objects
184+ // as it dynamically sizes the otherwise static slotmap.
185+ let local_repo = gix:: open_opts (
186+ local_repo. path ( ) ,
187+ gix:: open:: Options :: isolated ( ) . object_store_slots ( slots) ,
188+ ) ?;
189+ check_odb_accessability ( & local_repo, expected_object_count) ?;
146190 }
147- Err ( err) => assert ! ( err
148- . to_string( )
149- . starts_with( "The slotmap turned out to be too small with " ) ) ,
150191 }
151192 }
152193 }
0 commit comments