@@ -568,6 +568,14 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
568568 .truesize = rx_ring -> truesize ,
569569 .count = rx_ring -> count ,
570570 };
571+
572+ const struct libeth_fq_fp hdr_fq = {
573+ .pp = rx_ring -> hdr_pp ,
574+ .fqes = rx_ring -> hdr_fqes ,
575+ .truesize = rx_ring -> hdr_truesize ,
576+ .count = rx_ring -> count ,
577+ };
578+
571579 u16 ntu = rx_ring -> next_to_use ;
572580
573581 /* nothing to do or no valid netdev defined */
@@ -584,6 +592,14 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
584592
585593 rx_desc -> read .pkt_addr = cpu_to_le64 (addr );
586594
595+ if (!hdr_fq .pp )
596+ goto next ;
597+
598+ addr = libeth_rx_alloc (& hdr_fq , ntu );
599+ if (unlikely (addr == DMA_MAPPING_ERROR ))
600+ return ;
601+
602+ next :
587603 rx_desc ++ ;
588604 ntu ++ ;
589605 if (unlikely (ntu == fq .count )) {
@@ -781,6 +797,31 @@ static int ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
781797 return result ;
782798}
783799
800+ static u32 ixgbevf_rx_hsplit_wa (const struct libeth_fqe * hdr ,
801+ struct libeth_fqe * buf , u32 data_len )
802+ {
803+ u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN ;
804+ struct page * hdr_page , * buf_page ;
805+ const void * src ;
806+ void * dst ;
807+
808+ if (unlikely (netmem_is_net_iov (buf -> netmem )) ||
809+ !libeth_rx_sync_for_cpu (buf , copy ))
810+ return 0 ;
811+
812+ hdr_page = __netmem_to_page (hdr -> netmem );
813+ buf_page = __netmem_to_page (buf -> netmem );
814+ dst = page_address (hdr_page ) + hdr -> offset +
815+ pp_page_to_nmdesc (hdr_page )-> pp -> p .offset ;
816+ src = page_address (buf_page ) + buf -> offset +
817+ pp_page_to_nmdesc (buf_page )-> pp -> p .offset ;
818+
819+ memcpy (dst , src , LARGEST_ALIGN (copy ));
820+ buf -> offset += copy ;
821+
822+ return copy ;
823+ }
824+
784825static int ixgbevf_clean_rx_irq (struct ixgbevf_q_vector * q_vector ,
785826 struct ixgbevf_ring * rx_ring ,
786827 int budget )
@@ -798,6 +839,8 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
798839 while (likely (total_rx_packets < budget )) {
799840 union ixgbe_adv_rx_desc * rx_desc ;
800841 struct libeth_fqe * rx_buffer ;
842+ struct libeth_fqe * hdr_buff ;
843+ unsigned int hdr_size = 0 ;
801844 unsigned int size ;
802845
803846 /* return some buffers to hardware, one at a time is too slow */
@@ -818,6 +861,19 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
818861 rmb ();
819862
820863 rx_buffer = & rx_ring -> rx_fqes [rx_ring -> next_to_clean ];
864+
865+ if (!rx_ring -> hdr_pp )
866+ goto payload ;
867+
868+ hdr_buff = & rx_ring -> hdr_fqes [rx_ring -> next_to_clean ];
869+
870+ if (unlikely (!xdp -> data ))
871+ hdr_size = ixgbevf_rx_hsplit_wa (hdr_buff , rx_buffer ,
872+ size );
873+
874+ libeth_xdp_process_buff (xdp , hdr_buff , hdr_size );
875+
876+ payload :
821877 libeth_xdp_process_buff (xdp , rx_buffer , size );
822878
823879 cleaned_count ++ ;
@@ -3054,19 +3110,38 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
30543110 return err ;
30553111}
30563112
3057- /**
3058- * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
3059- * @adapter: board private structure
3060- * @rx_ring: Rx descriptor ring (for a specific queue) to setup
3061- *
3062- * Returns 0 on success, negative on failure
3063- **/
3064- int ixgbevf_setup_rx_resources (struct ixgbevf_adapter * adapter ,
3065- struct ixgbevf_ring * rx_ring )
3113+ static void ixgbvf_destroy_rx_pp (struct ixgbevf_ring * rx_ring )
3114+ {
3115+ struct libeth_fq fq = {
3116+ .pp = rx_ring -> pp ,
3117+ .fqes = rx_ring -> rx_fqes ,
3118+ };
3119+
3120+ libeth_rx_fq_destroy (& fq );
3121+ rx_ring -> rx_fqes = NULL ;
3122+ rx_ring -> pp = NULL ;
3123+
3124+ if (!rx_ring -> hdr_pp )
3125+ return ;
3126+
3127+ fq = (struct libeth_fq ) {
3128+ .pp = rx_ring -> hdr_pp ,
3129+ .fqes = rx_ring -> hdr_fqes ,
3130+ };
3131+
3132+ libeth_rx_fq_destroy (& fq );
3133+ rx_ring -> hdr_fqes = NULL ;
3134+ rx_ring -> hdr_pp = NULL ;
3135+ }
3136+
3137+ static int ixgbvf_create_rx_pp (struct ixgbevf_ring * rx_ring )
30663138{
3139+ u32 adapter_flags = rx_ring -> q_vector -> adapter -> flags ;
3140+
30673141 struct libeth_fq fq = {
30683142 .count = rx_ring -> count ,
30693143 .nid = NUMA_NO_NODE ,
3144+ .hsplit = adapter_flags & IXGBEVF_FLAG_PSEUDO_HSPLIT ,
30703145 .type = LIBETH_FQE_MTU ,
30713146 .xdp = !!rx_ring -> xdp_prog ,
30723147 .buf_len = IXGBEVF_RX_PAGE_LEN (rx_ring -> xdp_prog ?
@@ -3084,34 +3159,75 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
30843159 rx_ring -> truesize = fq .truesize ;
30853160 rx_ring -> rx_buf_len = fq .buf_len ;
30863161
3162+ if (!fq .hsplit )
3163+ return 0 ;
3164+
3165+ fq = (struct libeth_fq ) {
3166+ .count = rx_ring -> count ,
3167+ .nid = NUMA_NO_NODE ,
3168+ .type = LIBETH_FQE_HDR ,
3169+ .xdp = !!rx_ring -> xdp_prog ,
3170+ };
3171+
3172+ ret = libeth_rx_fq_create (& fq , & rx_ring -> q_vector -> napi );
3173+ if (ret )
3174+ goto err ;
3175+
3176+ rx_ring -> hdr_pp = fq .pp ;
3177+ rx_ring -> hdr_fqes = fq .fqes ;
3178+ rx_ring -> hdr_truesize = fq .truesize ;
3179+ rx_ring -> hdr_buf_len = fq .buf_len ;
3180+
3181+ return 0 ;
3182+
3183+ err :
3184+ ixgbvf_destroy_rx_pp (rx_ring );
3185+ return ret ;
3186+ }
3187+
3188+ /**
3189+ * ixgbevf_setup_rx_resources - allocate Rx resources
3190+ * @adapter: board private structure
3191+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
3192+ *
3193+ * Returns: 0 on success, negative on failure.
3194+ **/
3195+ int ixgbevf_setup_rx_resources (struct ixgbevf_adapter * adapter ,
3196+ struct ixgbevf_ring * rx_ring )
3197+ {
3198+ int ret ;
3199+
3200+ ret = ixgbvf_create_rx_pp (rx_ring );
3201+ if (ret )
3202+ return ret ;
3203+
30873204 u64_stats_init (& rx_ring -> syncp );
30883205
30893206 /* Round up to nearest 4K */
30903207 rx_ring -> size = rx_ring -> count * sizeof (union ixgbe_adv_rx_desc );
30913208 rx_ring -> size = ALIGN (rx_ring -> size , 4096 );
30923209
3093- rx_ring -> desc = dma_alloc_coherent (fq . pp -> p .dev , rx_ring -> size ,
3210+ rx_ring -> desc = dma_alloc_coherent (rx_ring -> pp -> p .dev , rx_ring -> size ,
30943211 & rx_ring -> dma , GFP_KERNEL );
30953212
30963213 if (!rx_ring -> desc )
30973214 goto err ;
30983215
30993216 /* XDP RX-queue info */
31003217 ret = __xdp_rxq_info_reg (& rx_ring -> xdp_rxq , adapter -> netdev ,
3101- rx_ring -> queue_index , 0 , fq . buf_len );
3218+ rx_ring -> queue_index , 0 , rx_ring -> rx_buf_len );
31023219 if (ret )
31033220 goto err ;
31043221
3105- xdp_rxq_info_attach_page_pool (& rx_ring -> xdp_rxq , fq . pp );
3222+ xdp_rxq_info_attach_page_pool (& rx_ring -> xdp_rxq , rx_ring -> pp );
31063223
31073224 rx_ring -> xdp_prog = adapter -> xdp_prog ;
31083225
31093226 return 0 ;
31103227err :
3111- libeth_rx_fq_destroy (& fq );
3112- rx_ring -> rx_fqes = NULL ;
3113- rx_ring -> pp = NULL ;
3228+ ixgbvf_destroy_rx_pp (rx_ring );
31143229 dev_err (rx_ring -> dev , "Unable to allocate memory for the Rx descriptor ring\n" );
3230+
31153231 return ret ;
31163232}
31173233
@@ -4222,6 +4338,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
42224338 netdev -> priv_flags |= IFF_UNICAST_FLT ;
42234339 netdev -> xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_RX_SG ;
42244340
4341+ if (adapter -> hw .mac .type == ixgbe_mac_82599_vf )
4342+ adapter -> flags |= IXGBEVF_FLAG_PSEUDO_HSPLIT ;
4343+
42254344 /* MTU range: 68 - 1504 or 9710 */
42264345 netdev -> min_mtu = ETH_MIN_MTU ;
42274346 switch (adapter -> hw .api_version ) {
0 commit comments