2626#include <linux/kthread.h>
2727#include <linux/tcp.h>
2828#include <linux/topology.h>
29+ #include <linux/nodemask.h>
2930
3031#undef DEBUG
3132#if DBG_CACHE > 0
@@ -234,13 +235,13 @@ enum {
234235};
235236
236237typedef struct {
237- int cpu [ NR_CPUS ] ;
238+ int * cpu ;
238239 atomic_t cpu_idx ;
239240 unsigned int nr_cpus ;
240241 TDB * db ;
241242} CaNode ;
242243
243- static CaNode c_nodes [ MAX_NUMNODES ] ;
244+ static CaNode * c_nodes ;
244245
245246typedef int tfw_cache_write_actor_t (TDB * , TdbVRec * * , TfwHttpResp * , char * * ,
246247 size_t , TfwDecodeCacheIter * );
@@ -333,18 +334,59 @@ tfw_cache_key_node(unsigned long key)
333334}
334335
335336/**
336- * Just choose any CPU for each node to use queue_work_on() for
337- * nodes scheduling. Reserve 0th CPU for other tasks.
337+ * Release node-cpu map.
338338 */
339339static void
340+ tfw_release_node_cpus (void )
341+ {
342+ int node ;
343+
344+ if (!c_nodes )
345+ return ;
346+
347+ for (node = 0 ; node < nr_online_nodes ; node ++ ) {
348+ if (c_nodes [node ].cpu )
349+ kfree (c_nodes [node ].cpu );
350+ }
351+ kfree (c_nodes );
352+ }
353+
354+ /**
355+ * Create node-cpu map to use queue_work_on() for nodes scheduling.
356+ * 0th CPU is reserved for other tasks.
357+ * At the moment we doesn't support CPU hotplug, so enumerate only online CPUs.
358+ */
359+ static int
340360tfw_init_node_cpus (void )
341361{
342- int cpu , node ;
362+ int nr_cpus , cpu , node ;
363+
364+ T_DBG2 ("nr_online_nodes: %d" , nr_online_nodes );
365+
366+ c_nodes = kzalloc (nr_online_nodes * sizeof (CaNode ), GFP_KERNEL );
367+ if (!c_nodes ) {
368+ T_ERR ("Failed to allocate nodes map for cache work scheduler" );
369+ return - ENOMEM ;
370+ }
371+
372+ for_each_node_with_cpus (node ) {
373+ nr_cpus = nr_cpus_node (node );
374+ T_DBG2 ("node: %d nr_cpus: %d" ,node , nr_cpus );
375+ c_nodes [node ].cpu = kmalloc (nr_cpus * sizeof (int ), GFP_KERNEL );
376+ if (!c_nodes [node ].cpu ) {
377+ T_ERR ("Failed to allocate CPU array for node %d for cache work scheduler" ,
378+ node );
379+ return - ENOMEM ;
380+ }
381+ }
343382
344383 for_each_online_cpu (cpu ) {
345384 node = cpu_to_node (cpu );
385+ T_DBG2 ("node: %d cpu: %d" ,node , cpu );
346386 c_nodes [node ].cpu [c_nodes [node ].nr_cpus ++ ] = cpu ;
347387 }
388+
389+ return 0 ;
348390}
349391
350392static TDB *
@@ -976,8 +1018,7 @@ tfw_cache_send_304(TfwHttpReq *req, TfwCacheEntry *ce)
9761018
9771019 resp -> mit .start_off = FRAME_HEADER_SIZE ;
9781020
979- r = tfw_h2_resp_status_write (resp , 304 , false, true,
980- stream_id );
1021+ r = tfw_h2_resp_status_write (resp , 304 , false, true);
9811022 if (unlikely (r ))
9821023 goto err_setup ;
9831024 /* account for :status field itself */
@@ -1018,7 +1059,7 @@ tfw_cache_send_304(TfwHttpReq *req, TfwCacheEntry *ce)
10181059 return ;
10191060 }
10201061
1021- if (tfw_h2_frame_local_resp (resp , stream_id , h_len , NULL ))
1062+ if (tfw_h2_frame_local_resp (resp , h_len , NULL ))
10221063 goto err_setup ;
10231064
10241065 tfw_h2_req_unlink_stream (req );
@@ -2624,7 +2665,7 @@ tfw_cache_add_body_page(TfwMsgIter *it, char *p, int sz, bool h2,
26242665 */
26252666static int
26262667tfw_cache_build_resp_body (TDB * db , TdbVRec * trec , TfwMsgIter * it , char * p ,
2627- unsigned long body_sz , bool h2 , unsigned int stream_id )
2668+ unsigned long body_sz , bool h2 )
26282669{
26292670 int r ;
26302671 bool sh_frag = h2 ? false : true;
@@ -2660,10 +2701,6 @@ tfw_cache_build_resp_body(TDB *db, TdbVRec *trec, TfwMsgIter *it, char *p,
26602701 !body_sz );
26612702 if (r )
26622703 return r ;
2663- if (stream_id ) {
2664- skb_set_tfw_flags (it -> skb , SS_F_HTTT2_FRAME_DATA );
2665- skb_set_tfw_cb (it -> skb , stream_id );
2666- }
26672704 }
26682705 if (!body_sz || !(trec = tdb_next_rec_chunk (db , trec )))
26692706 break ;
@@ -2686,8 +2723,7 @@ tfw_cache_build_resp_body(TDB *db, TdbVRec *trec, TfwMsgIter *it, char *p,
26862723}
26872724
26882725static int
2689- tfw_cache_set_hdr_age (TfwHttpResp * resp , TfwCacheEntry * ce ,
2690- unsigned int stream_id )
2726+ tfw_cache_set_hdr_age (TfwHttpResp * resp , TfwCacheEntry * ce )
26912727{
26922728 int r ;
26932729 size_t digs ;
@@ -2718,8 +2754,7 @@ tfw_cache_set_hdr_age(TfwHttpResp *resp, TfwCacheEntry *ce,
27182754
27192755 if (to_h2 ) {
27202756 h_age .hpack_idx = 21 ;
2721- if ((r = tfw_hpack_encode (resp , & h_age , false, false,
2722- stream_id )))
2757+ if ((r = tfw_hpack_encode (resp , & h_age , false, false)))
27232758 goto err ;
27242759 } else {
27252760 if ((r = tfw_http_msg_expand_data (& mit -> iter , skb_head ,
@@ -2761,8 +2796,7 @@ tfw_cache_set_hdr_age(TfwHttpResp *resp, TfwCacheEntry *ce,
27612796 * TODO use iterator and passed skbs to be called from net_tx_action.
27622797 */
27632798static TfwHttpResp *
2764- tfw_cache_build_resp (TfwHttpReq * req , TfwCacheEntry * ce , long lifetime ,
2765- unsigned int stream_id )
2799+ tfw_cache_build_resp (TfwHttpReq * req , TfwCacheEntry * ce , long lifetime )
27662800{
27672801 int h ;
27682802 TfwStr dummy_body = { 0 };
@@ -2821,14 +2855,14 @@ tfw_cache_build_resp(TfwHttpReq *req, TfwCacheEntry *ce, long lifetime,
28212855 * Set 'set-cookie' header if needed, for HTTP/2 or HTTP/1.1
28222856 * response.
28232857 */
2824- if (tfw_http_sess_resp_process (resp , true, stream_id ))
2858+ if (tfw_http_sess_resp_process (resp , true))
28252859 goto free ;
28262860 /*
28272861 * RFC 7234 p.4 Constructing Responses from Caches:
28282862 * When a stored response is used to satisfy a request without
28292863 * validation, a cache MUST generate an Age header field.
28302864 */
2831- if (tfw_cache_set_hdr_age (resp , ce , stream_id ))
2865+ if (tfw_cache_set_hdr_age (resp , ce ))
28322866 goto free ;
28332867
28342868 if (!TFW_MSG_H2 (req )) {
@@ -2856,11 +2890,11 @@ tfw_cache_build_resp(TfwHttpReq *req, TfwCacheEntry *ce, long lifetime,
28562890 }
28572891
28582892 /* Set additional headers for HTTP/2 response. */
2859- if (tfw_h2_resp_add_loc_hdrs (resp , h_mods , true, stream_id )
2893+ if (tfw_h2_resp_add_loc_hdrs (resp , h_mods , true)
28602894 || (lifetime > ce -> lifetime
2861- && tfw_h2_set_stale_warn (resp , stream_id ))
2895+ && tfw_h2_set_stale_warn (resp ))
28622896 || (!test_bit (TFW_HTTP_B_HDR_DATE , resp -> flags )
2863- && tfw_h2_add_hdr_date (resp , true, stream_id )))
2897+ && tfw_h2_add_hdr_date (resp , true)))
28642898 goto free ;
28652899
28662900 h_len += mit -> acc_len ;
@@ -2881,7 +2915,7 @@ tfw_cache_build_resp(TfwHttpReq *req, TfwCacheEntry *ce, long lifetime,
28812915 * send content in the response.
28822916 */
28832917 dummy_body .len = req -> method != TFW_HTTP_METH_HEAD ? ce -> body_len : 0 ;
2884- if (tfw_h2_frame_local_resp (resp , stream_id , h_len , & dummy_body ))
2918+ if (tfw_h2_frame_local_resp (resp , h_len , & dummy_body ))
28852919 goto free ;
28862920 it -> skb = ss_skb_peek_tail (& it -> skb_head );
28872921 it -> frag = skb_shinfo (it -> skb )-> nr_frags - 1 ;
@@ -2891,7 +2925,7 @@ tfw_cache_build_resp(TfwHttpReq *req, TfwCacheEntry *ce, long lifetime,
28912925 BUG_ON (p != TDB_PTR (db -> hdr , ce -> body ));
28922926 if (ce -> body_len && req -> method != TFW_HTTP_METH_HEAD ) {
28932927 if (tfw_cache_build_resp_body (db , trec , it , p , ce -> body_len ,
2894- TFW_MSG_H2 (req ), stream_id ))
2928+ TFW_MSG_H2 (req )))
28952929 goto free ;
28962930 }
28972931 resp -> content_length = ce -> body_len ;
@@ -2952,8 +2986,7 @@ cache_req_process_node(TfwHttpReq *req, tfw_http_cache_cb_t action)
29522986 }
29532987 }
29542988
2955- resp = tfw_cache_build_resp (req , ce , lifetime , id );
2956-
2989+ resp = tfw_cache_build_resp (req , ce , lifetime );
29572990 /*
29582991 * The stream of HTTP/2-request should be closed here since we have
29592992 * successfully created the resulting response from cache and will
@@ -3160,6 +3193,31 @@ tfw_cache_mgr(void *arg)
31603193}
31613194#endif
31623195
3196+ static inline int
3197+ tfw_cache_wq_init (int cpu )
3198+ {
3199+ TfwWorkTasklet * ct = & per_cpu (cache_wq , cpu );
3200+ int r ;
3201+
3202+ r = tfw_wq_init (& ct -> wq , TFW_DFLT_QSZ , cpu_to_node (cpu ));
3203+ if (unlikely (r ))
3204+ return r ;
3205+ init_irq_work (& ct -> ipi_work , tfw_cache_ipi );
3206+ tasklet_init (& ct -> tasklet , tfw_wq_tasklet , (unsigned long )ct );
3207+
3208+ return 0 ;
3209+ }
3210+
3211+ static inline void
3212+ tfw_cache_wq_clear (int cpu )
3213+ {
3214+ TfwWorkTasklet * ct = & per_cpu (cache_wq , cpu );
3215+
3216+ tasklet_kill (& ct -> tasklet );
3217+ irq_work_sync (& ct -> ipi_work );
3218+ tfw_wq_destroy (& ct -> wq );
3219+ }
3220+
31633221static int
31643222tfw_cache_start (void )
31653223{
@@ -3171,11 +3229,16 @@ tfw_cache_start(void)
31713229 if (!(cache_cfg .cache || g_vhost -> cache_purge ))
31723230 return 0 ;
31733231
3174- for_each_node_with_cpus (i ) {
3232+ if ((r = tfw_init_node_cpus ()))
3233+ goto node_cpus_alloc_err ;
3234+
3235+ for (i = 0 ; i < nr_online_nodes ; i ++ ) {
31753236 c_nodes [i ].db = tdb_open (cache_cfg .db_path ,
31763237 cache_cfg .db_size , 0 , i );
3177- if (!c_nodes [i ].db )
3238+ if (!c_nodes [i ].db ) {
3239+ r = - ENOMEM ;
31783240 goto close_db ;
3241+ }
31793242 }
31803243#if 0
31813244 cache_mgr_thr = kthread_run (tfw_cache_mgr , NULL , "tfw_cache_mgr" );
@@ -3185,19 +3248,14 @@ tfw_cache_start(void)
31853248 goto close_db ;
31863249 }
31873250#endif
3188- tfw_init_node_cpus ();
31893251
31903252 TFW_WQ_CHECKSZ (TfwCWork );
31913253 for_each_online_cpu (i ) {
3192- TfwWorkTasklet * ct = & per_cpu (cache_wq , i );
3193- r = tfw_wq_init (& ct -> wq , TFW_DFLT_QSZ , cpu_to_node (i ));
3194- if (r ) {
3195- T_ERR_NL ("%s: Can't initialize cache work queue for CPU #%d\n" ,
3196- __func__ , i );
3197- goto close_db ;
3254+ if (unlikely (r = tfw_cache_wq_init (i ))) {
3255+ T_ERR_NL ("%s: Can't initialize cache work"
3256+ " queue for CPU #%d\n" , __func__ , i );
3257+ goto free_tasklet ;
31983258 }
3199- init_irq_work (& ct -> ipi_work , tfw_cache_ipi );
3200- tasklet_init (& ct -> tasklet , tfw_wq_tasklet , (unsigned long )ct );
32013259 }
32023260
32033261#if defined(DEBUG )
@@ -3219,9 +3277,15 @@ tfw_cache_start(void)
32193277 for_each_online_cpu (i )
32203278 kfree (per_cpu (ce_dbg_buf , i ));
32213279#endif
3280+ free_tasklet :
3281+ for_each_online_cpu (i )
3282+ tfw_cache_wq_clear (i );
32223283close_db :
32233284 for_each_node_with_cpus (i )
32243285 tdb_close (c_nodes [i ].db );
3286+
3287+ node_cpus_alloc_err :
3288+ tfw_release_node_cpus ();
32253289 return r ;
32263290}
32273291
@@ -3235,12 +3299,8 @@ tfw_cache_stop(void)
32353299 if (!cache_cfg .cache )
32363300 return ;
32373301
3238- for_each_online_cpu (i ) {
3239- TfwWorkTasklet * ct = & per_cpu (cache_wq , i );
3240- tasklet_kill (& ct -> tasklet );
3241- irq_work_sync (& ct -> ipi_work );
3242- tfw_wq_destroy (& ct -> wq );
3243- }
3302+ for_each_online_cpu (i )
3303+ tfw_cache_wq_clear (i );
32443304#if 0
32453305 kthread_stop (cache_mgr_thr );
32463306#endif
@@ -3252,6 +3312,8 @@ tfw_cache_stop(void)
32523312
32533313 for_each_node_with_cpus (i )
32543314 tdb_close (c_nodes [i ].db );
3315+
3316+ tfw_release_node_cpus ();
32553317}
32563318
32573319static const TfwCfgEnum cache_http_methods_enum [] = {
0 commit comments