1414#define SELECT_NANOSLEEP 1
1515#define SELECT_CLOCK_NANOSLEEP 0
1616
17- static inline int select_nanosleep (int selection , clockid_t clock_id , int flags ,
18- const struct timespec * rqtp , struct timespec * rmtp )
19- {
20- if (selection == SELECT_NANOSLEEP ) {
21- return nanosleep (rqtp , rmtp );
22- }
23- return clock_nanosleep (clock_id , flags , rqtp , rmtp );
24- }
25-
26- static inline uint64_t cycle_get_64 (void )
27- {
28- if (IS_ENABLED (CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER )) {
29- return k_cycle_get_64 ();
30- } else {
31- return k_cycle_get_32 ();
32- }
33- }
17+ void common_lower_bound_check (int selection , clockid_t clock_id , int flags , const uint32_t s ,
18+ uint32_t ns );
19+ int select_nanosleep (int selection , clockid_t clock_id , int flags , const struct timespec * rqtp ,
20+ struct timespec * rmtp );
3421
3522static void common_errors (int selection , clockid_t clock_id , int flags )
3623{
@@ -129,77 +116,6 @@ ZTEST(posix_timers, test_clock_nanosleep_errors_errno)
129116 zassert_equal (rem .tv_nsec , 0 , "actual: %d expected: %d" , rem .tv_nsec , 0 );
130117}
131118
132- /**
133- * @brief Check that a call to nanosleep has yielded execution for some minimum time.
134- *
135- * Check that the actual time slept is >= the total time specified by @p s (in seconds) and
136- * @p ns (in nanoseconds).
137- *
138- * @note The time specified by @p s and @p ns is assumed to be absolute (i.e. a time-point)
139- * when @p selection is set to @ref SELECT_CLOCK_NANOSLEEP. The time is assumed to be relative
140- * when @p selection is set to @ref SELECT_NANOSLEEP.
141- *
142- * @param selection Either @ref SELECT_CLOCK_NANOSLEEP or @ref SELECT_NANOSLEEP
143- * @param clock_id The clock to test (e.g. @ref CLOCK_MONOTONIC or @ref CLOCK_REALTIME)
144- * @param flags Flags to pass to @ref clock_nanosleep
145- * @param s Partial lower bound for yielded time (in seconds)
146- * @param ns Partial lower bound for yielded time (in nanoseconds)
147- */
148- static void common_lower_bound_check (int selection , clockid_t clock_id , int flags , const uint32_t s ,
149- uint32_t ns )
150- {
151- int r ;
152- uint64_t actual_ns = 0 ;
153- uint64_t exp_ns ;
154- uint64_t now ;
155- uint64_t then ;
156- struct timespec rem = {0 , 0 };
157- struct timespec req = {s , ns };
158-
159- errno = 0 ;
160- then = cycle_get_64 ();
161- r = select_nanosleep (selection , clock_id , flags , & req , & rem );
162- now = cycle_get_64 ();
163-
164- zassert_equal (r , 0 , "actual: %d expected: %d" , r , 0 );
165- zassert_equal (errno , 0 , "actual: %d expected: %d" , errno , 0 );
166- zassert_equal (req .tv_sec , s , "actual: %d expected: %d" , req .tv_sec , s );
167- zassert_equal (req .tv_nsec , ns , "actual: %d expected: %d" , req .tv_nsec , ns );
168- zassert_equal (rem .tv_sec , 0 , "actual: %d expected: %d" , rem .tv_sec , 0 );
169- zassert_equal (rem .tv_nsec , 0 , "actual: %d expected: %d" , rem .tv_nsec , 0 );
170-
171- switch (selection ) {
172- case SELECT_NANOSLEEP :
173- /* exp_ns and actual_ns are relative (i.e. durations) */
174- actual_ns = k_cyc_to_ns_ceil64 (now + then );
175- break ;
176- case SELECT_CLOCK_NANOSLEEP :
177- /* exp_ns and actual_ns are absolute (i.e. time-points) */
178- actual_ns = k_cyc_to_ns_ceil64 (now );
179- break ;
180- default :
181- zassert_unreachable ();
182- break ;
183- }
184-
185- exp_ns = (uint64_t )s * NSEC_PER_SEC + ns ;
186- /* round up to the nearest microsecond for k_busy_wait() */
187- exp_ns = DIV_ROUND_UP (exp_ns , NSEC_PER_USEC ) * NSEC_PER_USEC ;
188-
189- /* The comparison may be incorrect if counter wrap happened. In case of ARC HSDK platforms
190- * we have high counter clock frequency (500MHz or 1GHz) so counter wrap quite likely to
191- * happen if we wait long enough. As in some test cases we wait more than 1 second, there
192- * are significant chances to get false-positive assertion.
193- * TODO: switch test for k_cycle_get_64 usage where available.
194- */
195- #if !defined(CONFIG_SOC_ARC_HSDK ) && !defined(CONFIG_SOC_ARC_HSDK4XD )
196- /* lower bounds check */
197- zassert_true (actual_ns >= exp_ns , "actual: %llu expected: %llu" , actual_ns , exp_ns );
198- #endif
199-
200- /* TODO: Upper bounds check when hr timers are available */
201- }
202-
203119ZTEST (posix_timers , test_nanosleep_execution )
204120{
205121 /* sleep for 1ns */
@@ -220,64 +136,3 @@ ZTEST(posix_timers, test_nanosleep_execution)
220136 /* sleep for 1s + 1us + 1ns */
221137 common_lower_bound_check (SELECT_NANOSLEEP , 0 , 0 , 1 , 1001 );
222138}
223-
224- ZTEST (posix_timers , test_clock_nanosleep_execution )
225- {
226- struct timespec ts ;
227-
228- clock_gettime (CLOCK_MONOTONIC , & ts );
229-
230- /* absolute sleeps with the monotonic clock and reference time ts */
231-
232- /* until 1s + 1ns past the reference time */
233- common_lower_bound_check (SELECT_CLOCK_NANOSLEEP , CLOCK_MONOTONIC , TIMER_ABSTIME ,
234- ts .tv_sec + 1 , 1 );
235-
236- /* until 1s + 1us past the reference time */
237- common_lower_bound_check (SELECT_CLOCK_NANOSLEEP , CLOCK_MONOTONIC , TIMER_ABSTIME ,
238- ts .tv_sec + 1 , 1000 );
239-
240- /* until 1s + 500000000ns past the reference time */
241- common_lower_bound_check (SELECT_CLOCK_NANOSLEEP , CLOCK_MONOTONIC , TIMER_ABSTIME ,
242- ts .tv_sec + 1 , 500000000 );
243-
244- /* until 2s past the reference time */
245- common_lower_bound_check (SELECT_CLOCK_NANOSLEEP , CLOCK_MONOTONIC , TIMER_ABSTIME ,
246- ts .tv_sec + 2 , 0 );
247-
248- /* until 2s + 1ns past the reference time */
249- common_lower_bound_check (SELECT_CLOCK_NANOSLEEP , CLOCK_MONOTONIC , TIMER_ABSTIME ,
250- ts .tv_sec + 2 , 1 );
251-
252- /* until 2s + 1us + 1ns past reference time */
253- common_lower_bound_check (SELECT_CLOCK_NANOSLEEP , CLOCK_MONOTONIC , TIMER_ABSTIME ,
254- ts .tv_sec + 2 , 1001 );
255-
256- clock_gettime (CLOCK_REALTIME , & ts );
257-
258- /* absolute sleeps with the real time clock and adjusted reference time ts */
259-
260- /* until 1s + 1ns past the reference time */
261- common_lower_bound_check (SELECT_CLOCK_NANOSLEEP , CLOCK_REALTIME , TIMER_ABSTIME ,
262- ts .tv_sec + 1 , 1 );
263-
264- /* until 1s + 1us past the reference time */
265- common_lower_bound_check (SELECT_CLOCK_NANOSLEEP , CLOCK_REALTIME , TIMER_ABSTIME ,
266- ts .tv_sec + 1 , 1000 );
267-
268- /* until 1s + 500000000ns past the reference time */
269- common_lower_bound_check (SELECT_CLOCK_NANOSLEEP , CLOCK_REALTIME , TIMER_ABSTIME ,
270- ts .tv_sec + 1 , 500000000 );
271-
272- /* until 2s past the reference time */
273- common_lower_bound_check (SELECT_CLOCK_NANOSLEEP , CLOCK_REALTIME , TIMER_ABSTIME ,
274- ts .tv_sec + 2 , 0 );
275-
276- /* until 2s + 1ns past the reference time */
277- common_lower_bound_check (SELECT_CLOCK_NANOSLEEP , CLOCK_REALTIME , TIMER_ABSTIME ,
278- ts .tv_sec + 2 , 1 );
279-
280- /* until 2s + 1us + 1ns past the reference time */
281- common_lower_bound_check (SELECT_CLOCK_NANOSLEEP , CLOCK_REALTIME , TIMER_ABSTIME ,
282- ts .tv_sec + 2 , 1001 );
283- }
0 commit comments