@@ -1357,34 +1357,22 @@ int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
}
/**
- * ice_copy_rxq_ctx_to_hw
+ * ice_copy_rxq_ctx_to_hw - Copy packed Rx queue context to HW registers
* @hw: pointer to the hardware structure
- * @ice_rxq_ctx: pointer to the rxq context
+ * @rxq_ctx: pointer to the packed Rx queue context
* @rxq_index: the index of the Rx queue
- *
- * Copies rxq context from dense structure to HW register space
*/
-static int
-ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
+static void ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *rxq_ctx,
+ u32 rxq_index)
{
- u8 i;
-
- if (!ice_rxq_ctx)
- return -EINVAL;
-
- if (rxq_index > QRX_CTRL_MAX_INDEX)
- return -EINVAL;
-
/* Copy each dword separately to HW */
- for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
- wr32(hw, QRX_CONTEXT(i, rxq_index),
- *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
+ for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
+ u32 ctx = ((u32 *)rxq_ctx)[i];
- ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
- *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
+ wr32(hw, QRX_CONTEXT(i, rxq_index), ctx);
+
+ ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, ctx);
}
-
- return 0;
}
/**
@@ -1497,23 +1485,27 @@ const struct ice_ctx_ele ice_rlan_ctx_info[] = {
/**
* ice_write_rxq_ctx - Write Rx Queue context to hardware
* @hw: pointer to the hardware structure
- * @rlan_ctx: pointer to the rxq context
+ * @rlan_ctx: pointer to the packed Rx queue context
* @rxq_index: the index of the Rx queue
*
* Pack the sparse Rx Queue context into dense hardware format and write it
* into the HW register space.
+ *
+ * Return: 0 on success, or -EINVAL if the Rx queue index is invalid.
*/
int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
- u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
+ u8 ctx_buf[ICE_RXQ_CTX_SZ] = {};
- if (!rlan_ctx)
+ if (rxq_index > QRX_CTRL_MAX_INDEX)
return -EINVAL;
ice_pack_rxq_ctx(rlan_ctx, ctx_buf);
- return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
+ ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
+
+ return 0;
}
/* LAN Tx Queue Context */
The ice_copy_rxq_ctx_to_hw() and ice_write_rxq_ctx() functions perform some defensive checks which are typically frowned upon by kernel style guidelines. In particular, they perform NULL checks on buffers which are never expected to be NULL. Both functions are only called once and always have valid buffers pointing to stack memory. These checks only serve to hide potential programming error, as we will not produce the normal crash dump on a NULL access. In addition, ice_copy_rxq_ctx_to_hw() cannot fail in another way, so could be made void. Future support for VF Live Migration will need to introduce an inverse function for reading Rx queue context from HW registers to unpack it, as well as functions to pack and unpack Tx queue context from HW. Rather than copying these style issues into the new functions, lets first cleanup the existing code. For the ice_copy_rxq_ctx_to_hw() function: * Remove the NULL parameter check. * Move the Rx queue index check out of this function. * Convert the function to a void return. * Use a simple int variable instead of a u8 for the for loop index. * Use a local variable and array syntax to access the rxq_ctx. * Update the function description to better align with kernel doc style. For the ice_write_rxq_ctx() function: * Use the more common '= {}' syntax for initializing the context buffer. * Move the Rx queue index check into this function. * Update the function description with a Returns: to align with kernel doc style. These changes align the existing write functions to current kernel style, and will align with the style of the new functions added when we implement live migration in a future series. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> --- drivers/net/ethernet/intel/ice/ice_common.c | 42 ++++++++++++----------------- 1 file changed, 17 insertions(+), 25 deletions(-)