]> xenbits.xensource.com Git - people/aperard/linux.git/commitdiff
octeontx2-af: add mbox to return CPT_AF_FLT_INT info
authorSrujana Challa <schalla@marvell.com>
Wed, 18 Jan 2023 12:03:54 +0000 (17:33 +0530)
committerSasha Levin <sashal@kernel.org>
Tue, 26 Mar 2024 22:21:01 +0000 (18:21 -0400)
[ Upstream commit 8299ffe3dc3dc9ac2bd60e3a8332008f03156aca ]

CPT HW would trigger the CPT AF FLT interrupt when CPT engines
hits some uncorrectable errors and AF is the one which receives
the interrupt and recovers the engines.
This patch adds a mailbox for CPT VFs to request for CPT faulted
and recovered engines info.

Signed-off-by: Srujana Challa <schalla@marvell.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Stable-dep-of: a88e0f936ba9 ("octeontx2: Detect the mbox up or down message via register")
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/net/ethernet/marvell/octeontx2/af/mbox.h
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c

index 5decd1919de039de0877d191265cdbe73fec886e..bbb6658420f1df29d926903645069e83466784fc 100644 (file)
@@ -197,6 +197,8 @@ M(CPT_RXC_TIME_CFG,     0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req,  \
                               msg_rsp)                                 \
 M(CPT_CTX_CACHE_SYNC,   0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp)    \
 M(CPT_LF_RESET,         0xA08, cpt_lf_reset, cpt_lf_rst_req, msg_rsp)  \
+M(CPT_FLT_ENG_INFO,     0xA09, cpt_flt_eng_info, cpt_flt_eng_info_req, \
+                              cpt_flt_eng_info_rsp)                    \
 /* SDP mbox IDs (range 0x1000 - 0x11FF) */                             \
 M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \
 M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \
@@ -1710,6 +1712,21 @@ struct cpt_lf_rst_req {
        u32 rsvd;
 };
 
+/* Mailbox message format to request for CPT faulted engines */
+struct cpt_flt_eng_info_req {
+       struct mbox_msghdr hdr;
+       int blkaddr;
+       bool reset;
+       u32 rsvd;
+};
+
+struct cpt_flt_eng_info_rsp {
+       struct mbox_msghdr hdr;
+       u64 flt_eng_map[CPT_10K_AF_INT_VEC_RVU];
+       u64 rcvrd_eng_map[CPT_10K_AF_INT_VEC_RVU];
+       u64 rsvd;
+};
+
 struct sdp_node_info {
        /* Node to which this PF belons to */
        u8 node_id;
index e1760f9298b179e92e4e2cfc66f83b5dd0e6a8e4..6a39006c334d7718b87a74aad2fd63cd1dd732c7 100644 (file)
@@ -109,6 +109,8 @@ struct rvu_block {
        u64  lfreset_reg;
        unsigned char name[NAME_SIZE];
        struct rvu *rvu;
+       u64 cpt_flt_eng_map[3];
+       u64 cpt_rcvrd_eng_map[3];
 };
 
 struct nix_mcast {
@@ -521,6 +523,8 @@ struct rvu {
        struct list_head        mcs_intrq_head;
        /* mcs interrupt queue lock */
        spinlock_t              mcs_intrq_lock;
+       /* CPT interrupt lock */
+       spinlock_t              cpt_intr_lock;
 };
 
 static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
index 923af460db2963c46a603d364f8c095a37466039..6fb02b93c17185499f80b7b91e7593e51360e247 100644 (file)
@@ -70,6 +70,14 @@ static irqreturn_t cpt_af_flt_intr_handler(int vec, void *ptr)
 
                rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), grp);
                rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val | 1ULL);
+
+               spin_lock(&rvu->cpt_intr_lock);
+               block->cpt_flt_eng_map[vec] |= BIT_ULL(i);
+               val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(eng));
+               val = val & 0x3;
+               if (val == 0x1 || val == 0x2)
+                       block->cpt_rcvrd_eng_map[vec] |= BIT_ULL(i);
+               spin_unlock(&rvu->cpt_intr_lock);
        }
        rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(vec), reg);
 
@@ -884,6 +892,31 @@ int rvu_mbox_handler_cpt_lf_reset(struct rvu *rvu, struct cpt_lf_rst_req *req,
        return 0;
 }
 
+int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_req *req,
+                                     struct cpt_flt_eng_info_rsp *rsp)
+{
+       struct rvu_block *block;
+       unsigned long flags;
+       int blkaddr, vec;
+
+       blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+       if (blkaddr < 0)
+               return blkaddr;
+
+       block = &rvu->hw->block[blkaddr];
+       for (vec = 0; vec < CPT_10K_AF_INT_VEC_RVU; vec++) {
+               spin_lock_irqsave(&rvu->cpt_intr_lock, flags);
+               rsp->flt_eng_map[vec] = block->cpt_flt_eng_map[vec];
+               rsp->rcvrd_eng_map[vec] = block->cpt_rcvrd_eng_map[vec];
+               if (req->reset) {
+                       block->cpt_flt_eng_map[vec] = 0x0;
+                       block->cpt_rcvrd_eng_map[vec] = 0x0;
+               }
+               spin_unlock_irqrestore(&rvu->cpt_intr_lock, flags);
+       }
+       return 0;
+}
+
 static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
 {
        struct cpt_rxc_time_cfg_req req;
@@ -1172,5 +1205,7 @@ int rvu_cpt_init(struct rvu *rvu)
 {
        /* Retrieve CPT PF number */
        rvu->cpt_pf_num = get_cpt_pf_num(rvu);
+       spin_lock_init(&rvu->cpt_intr_lock);
+
        return 0;
 }