From patchwork Tue Sep 12 11:59:37 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Przemek Kitszel X-Patchwork-Id: 13381545 X-Patchwork-Delegate: kuba@kernel.org Received: from lindbergh.monkeyblade.net (lindbergh.monkeyblade.net [23.128.96.19]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id AE7561803C for ; Tue, 12 Sep 2023 12:04:12 +0000 (UTC) Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.115]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id C874510D0; Tue, 12 Sep 2023 05:04:11 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1694520251; x=1726056251; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=SZefFP9uepkSXMi6Yk72LIhrMtlK/vBNBKmsU8au6+o=; b=JNCIETTo6fFD6v/UAjVH4VgCTw38JVo+LTk/MdGMOgbPFqYh/oR8XkC9 sdbnDlpVKJXU+dnL3dPpfsIbzIwnU4T/GU+2oFZGUQ7uWVHyof39Tx9U4 j1coeUUY/lyMWRsGjWi3eIS4Xqq13zBNz5e241S5MhyM6GrhI4UMG71rY y9v7WMQcXo0BJSuPNtOLbBynixWB5sl/Tbe9GEld0rvthHImPyOKx38xj 0Pu2orfwPdEvMN9FMTXbnE+vx+q1/QWqFOwmpj4Ff/eqsim85+VsPS/68 rKvnVJgGr0og+8FvavGYlT58cUQ4uZllaeu3/B44rgesK9JNzPnVHyBuT A==; X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="378265495" X-IronPort-AV: E=Sophos;i="6.02,139,1688454000"; d="scan'208";a="378265495" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Sep 2023 05:03:05 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="720389786" X-IronPort-AV: E=Sophos;i="6.02,139,1688454000"; d="scan'208";a="720389786" Received: from irvmail002.ir.intel.com ([10.43.11.120]) by orsmga006.jf.intel.com with ESMTP; 12 Sep 2023 05:03:00 -0700 Received: from pelor.igk.intel.com (pelor.igk.intel.com [10.123.220.13]) by irvmail002.ir.intel.com (Postfix) with ESMTP id 7C0AF33BD4; Tue, 12 Sep 2023 13:02:59 +0100 (IST) From: Przemek Kitszel To: netdev@vger.kernel.org, Jakub Kicinski , davem@davemloft.net, edumazet@google.com, pabeni@redhat.com Cc: Kees Cook , Jacob Keller , intel-wired-lan@lists.osuosl.org, Alexander Lobakin , linux-hardening@vger.kernel.org, Steven Zou , Anthony Nguyen , David Laight , Przemek Kitszel Subject: [PATCH net-next v5 7/7] ice: make use of DEFINE_FLEX() in ice_switch.c Date: Tue, 12 Sep 2023 07:59:37 -0400 Message-Id: <20230912115937.1645707-8-przemyslaw.kitszel@intel.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20230912115937.1645707-1-przemyslaw.kitszel@intel.com> References: <20230912115937.1645707-1-przemyslaw.kitszel@intel.com> Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: kuba@kernel.org Use DEFINE_FLEX() macro for 1-elem flex array members of ice_switch.c Signed-off-by: Przemek Kitszel --- add/remove: 2/2 grow/shrink: 3/6 up/down: 489/-470 (19) --- drivers/net/ethernet/intel/ice/ice_switch.c | 63 +++++---------------- 1 file changed, 14 insertions(+), 49 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 2f77b684ff76..ee19f3aa3d19 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -1812,15 +1812,11 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type, enum ice_adminq_opc opc) { - struct ice_aqc_alloc_free_res_elem *sw_buf; + DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1); + u16 buf_len = __struct_size(sw_buf); struct ice_aqc_res_elem *vsi_ele; - u16 buf_len; int status; - buf_len = struct_size(sw_buf, elem, 1); - sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); - if (!sw_buf) - return -ENOMEM; sw_buf->num_elems = cpu_to_le16(1); if (lkup_type == ICE_SW_LKUP_MAC || @@ -1840,25 +1836,22 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); } else { - status = -EINVAL; - goto ice_aq_alloc_free_vsi_list_exit; + return -EINVAL; } if (opc == ice_aqc_opc_free_res) sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id); status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, opc); if (status) - goto ice_aq_alloc_free_vsi_list_exit; + return status; if (opc == ice_aqc_opc_alloc_res) { vsi_ele = &sw_buf->elem[0]; *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp); } -ice_aq_alloc_free_vsi_list_exit: - devm_kfree(ice_hw_to_dev(hw), sw_buf); - return status; + return 0; } /** @@ -2088,24 +2081,18 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, */ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) { - struct ice_aqc_alloc_free_res_elem *sw_buf; - u16 buf_len; + DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1); + u16 buf_len = __struct_size(sw_buf); int status; - buf_len = struct_size(sw_buf, elem, 1); - sw_buf = kzalloc(buf_len, GFP_KERNEL); - if (!sw_buf) - return -ENOMEM; - sw_buf->num_elems = cpu_to_le16(1); sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE << ICE_AQC_RES_TYPE_S) | ICE_AQC_RES_TYPE_FLAG_SHARED); status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, ice_aqc_opc_alloc_res); if (!status) *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp); - kfree(sw_buf); return status; } @@ -4434,28 +4421,19 @@ int ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 *counter_id) { - struct ice_aqc_alloc_free_res_elem *buf; - u16 buf_len; + DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1); + u16 buf_len = __struct_size(buf); int status; - /* Allocate resource */ - buf_len = struct_size(buf, elem, 1); - buf = kzalloc(buf_len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - buf->num_elems = cpu_to_le16(num_items); buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & ICE_AQC_RES_TYPE_M) | alloc_shared); status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); if (status) - goto exit; + return status; *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp); - -exit: - kfree(buf); return status; } @@ -4471,26 +4449,19 @@ int ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 counter_id) { - struct ice_aqc_alloc_free_res_elem *buf; - u16 buf_len; + DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1); + u16 buf_len = __struct_size(buf); int status; - /* Free resource */ - buf_len = struct_size(buf, elem, 1); - buf = kzalloc(buf_len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - buf->num_elems = cpu_to_le16(num_items); buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & ICE_AQC_RES_TYPE_M) | alloc_shared); buf->elem[0].e.sw_resp = cpu_to_le16(counter_id); status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res); if (status) ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n"); - kfree(buf); return status; } @@ -4508,15 +4479,10 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, */ int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id) { - struct ice_aqc_alloc_free_res_elem *buf; - u16 buf_len; + DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1); + u16 buf_len = __struct_size(buf); int status; - buf_len = struct_size(buf, elem, 1); - buf = kzalloc(buf_len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - buf->num_elems = cpu_to_le16(1); if (shared) buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & @@ -4534,7 +4500,6 @@ int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id) ice_debug(hw, ICE_DBG_SW, "Could not set resource type %u id %u to %s\n", type, res_id, shared ? "SHARED" : "DEDICATED"); - kfree(buf); return status; }