@@ -738,6 +738,7 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev)
738738{
739739 struct gdma_context * gc = mdev_to_gc (mdev );
740740 struct gdma_queue_spec spec = {};
741+ struct gdma_irq_context * gic ;
741742 int err , i ;
742743
743744 spec .type = GDMA_EQ ;
@@ -748,6 +749,8 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev)
748749 spec .eq .log2_throttle_limit = LOG2_EQ_THROTTLE ;
749750 spec .eq .msix_index = 0 ;
750751
752+ gic = gdma_get_gic (gc , false, 0 , 0 , & spec .eq .msix_index );
753+
751754 err = mana_gd_create_mana_eq (& gc -> mana_ib , & spec , & mdev -> fatal_err_eq );
752755 if (err )
753756 return err ;
@@ -761,6 +764,9 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev)
761764 spec .eq .callback = NULL ;
762765 for (i = 0 ; i < mdev -> ib_dev .num_comp_vectors ; i ++ ) {
763766 spec .eq .msix_index = (i + 1 ) % gc -> num_msix_usable ;
767+
768+ gic = gdma_get_gic (gc , false, 0 , 0 , & spec .eq .msix_index );
769+
764770 err = mana_gd_create_mana_eq (mdev -> gdma_dev , & spec , & mdev -> eqs [i ]);
765771 if (err )
766772 goto destroy_eqs ;
@@ -780,12 +786,16 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev)
780786void mana_ib_destroy_eqs (struct mana_ib_dev * mdev )
781787{
782788 struct gdma_context * gc = mdev_to_gc (mdev );
783- int i ;
789+ int i , msi ;
784790
785791 mana_gd_destroy_queue (gc , mdev -> fatal_err_eq );
792+ gdma_put_gic (gc , false, 0 );
786793
787- for (i = 0 ; i < mdev -> ib_dev .num_comp_vectors ; i ++ )
794+ for (i = 0 ; i < mdev -> ib_dev .num_comp_vectors ; i ++ ) {
788795 mana_gd_destroy_queue (gc , mdev -> eqs [i ]);
796+ msi = (i + 1 ) % gc -> num_msix_usable ;
797+ gdma_put_gic (gc , false, msi );
798+ }
789799
790800 kfree (mdev -> eqs );
791801}
0 commit comments