diff mbox series

[iwlwifi-next] iwlwifi: de-const properly where needed

Message ID alpine.BSF.2.00.2102261652510.72241@ai.fobar.qr (mailing list archive)
State Superseded
Delegated to: Luca Coelho
Headers show
Series [iwlwifi-next] iwlwifi: de-const properly where needed | expand

Commit Message

Bjoern A. Zeeb Feb. 26, 2021, 4:53 p.m. UTC
In order to de-const variables simply casting through (void *) is
not enough: "cast from 'const .. *' to 'void *' drops const qualifier".
Cast through (uintptr_t) as well [1] to make this compile on systems
with more strict requirements.
In addition passing const void *data to dma_map_single() also
drops the (const) qualifier.  De-constify on variable on assignment
which may be overwritten later.  In either case the (void *) cast
to dma_map_single() is not needed (anymore) either.

[1] See __DECONST() in sys/sys/cdefs.h in FreeBSD

Sponsored-by:  The FreeBSD Foundation
Signed-off-by: Bjoern A. Zeeb <bz@FreeBSD.ORG>
---
  drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 2 +-
  drivers/net/wireless/intel/iwlwifi/mvm/ops.c      | 2 +-
  drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | 4 ++--
  drivers/net/wireless/intel/iwlwifi/pcie/tx.c      | 4 ++--
  4 files changed, 6 insertions(+), 6 deletions(-)

Comments

Kalle Valo March 1, 2021, 7:23 a.m. UTC | #1
"Bjoern A. Zeeb" <bz@FreeBSD.ORG> writes:

> In order to de-const variables simply casting through (void *) is
> not enough: "cast from 'const .. *' to 'void *' drops const qualifier".
> Cast through (uintptr_t) as well [1] to make this compile on systems
> with more strict requirements.
> In addition passing const void *data to dma_map_single() also
> drops the (const) qualifier.  De-constify on variable on assignment
> which may be overwritten later.  In either case the (void *) cast
> to dma_map_single() is not needed (anymore) either.
>
> [1] See __DECONST() in sys/sys/cdefs.h in FreeBSD
>
> Sponsored-by:  The FreeBSD Foundation
> Signed-off-by: Bjoern A. Zeeb <bz@FreeBSD.ORG>

Why are we using the const in the first place? That sounds like a bug to
me.

BTW, your patches are hard to read due to excessive context, I guess you
are using a very large context value with diff? Our recommendation is to
use git with default values, see the wiki below for more info.
Bjoern A. Zeeb March 10, 2021, 12:31 p.m. UTC | #2
On 1 Mar 2021, at 7:23, Kalle Valo wrote:

> "Bjoern A. Zeeb" <bz@FreeBSD.ORG> writes:
>
>> In order to de-const variables simply casting through (void *) is
>> not enough: "cast from 'const .. *' to 'void *' drops const 
>> qualifier".
>> Cast through (uintptr_t) as well [1] to make this compile on systems
>> with more strict requirements.
>> In addition passing const void *data to dma_map_single() also
>> drops the (const) qualifier.  De-constify on variable on assignment
>> which may be overwritten later.  In either case the (void *) cast
>> to dma_map_single() is not needed (anymore) either.
>>
>> [1] See __DECONST() in sys/sys/cdefs.h in FreeBSD
>>
>> Sponsored-by:  The FreeBSD Foundation
>> Signed-off-by: Bjoern A. Zeeb <bz@FreeBSD.ORG>
>
> Why are we using the const in the first place? That sounds like a bug 
> to
> me.

For the he_cap cases I’ll leave this to Intel to answer as they added 
the
comment that it is writeable.


For the dma_map_single(.., DMA_TO_DEVICE) having const data is probably 
okay.
This seems more (and I can only say from the distance not knowing Linux 
internals)
that the Linux KPI doesn’t/cannot cater for it.  I am not sure why it 
would need
to change a virtual address along the lines and the argument is not 
“const”.


> BTW, your patches are hard to read due to excessive context, I guess 
> you
> are using a very large context value with diff? Our recommendation is 
> to
> use git with default values, see the wiki below for more info.

Sorry.  I’ll fix the three casts you mentioned on the other review and 
send out
v2 with less context for all of them.


Best Regards,
Bjoern
diff mbox series

Patch

diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index baf7404c137d..3cad9c1d6814 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -3024,32 +3024,32 @@  static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
  static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm,
  					       struct ieee80211_vif *vif)
  {
  	struct ieee80211_supported_band *sband;
  	const struct ieee80211_sta_he_cap *he_cap;

  	if (vif->type != NL80211_IFTYPE_STATION)
  		return;

  	if (!mvm->cca_40mhz_workaround)
  		return;

  	/* decrement and check that we reached zero */
  	mvm->cca_40mhz_workaround--;
  	if (mvm->cca_40mhz_workaround)
  		return;

  	sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ];

  	sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;

  	he_cap = ieee80211_get_he_iftype_cap(sband,
  					     ieee80211_vif_type_p2p(vif));

  	if (he_cap) {
  		/* we know that ours is writable */
-		struct ieee80211_sta_he_cap *he = (void *)he_cap;
+		struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;

  		he->he_cap_elem.phy_cap_info[0] |=
  			IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
  	}
  }
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index ebed82c590e5..e79ca96e5844 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -149,74 +149,74 @@  static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
  static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
  				     struct iwl_rx_cmd_buffer *rxb)
  {
  	struct iwl_rx_packet *pkt = rxb_addr(rxb);
  	struct iwl_datapath_monitor_notif *notif = (void *)pkt->data;
  	struct ieee80211_supported_band *sband;
  	const struct ieee80211_sta_he_cap *he_cap;
  	struct ieee80211_vif *vif;

  	if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA))
  		return;

  	vif = iwl_mvm_get_vif_by_macid(mvm, notif->mac_id);
  	if (!vif || vif->type != NL80211_IFTYPE_STATION)
  		return;

  	if (!vif->bss_conf.chandef.chan ||
  	    vif->bss_conf.chandef.chan->band != NL80211_BAND_2GHZ ||
  	    vif->bss_conf.chandef.width < NL80211_CHAN_WIDTH_40)
  		return;

  	if (!vif->bss_conf.assoc)
  		return;

  	/* this shouldn't happen *again*, ignore it */
  	if (mvm->cca_40mhz_workaround)
  		return;

  	/*
  	 * We'll decrement this on disconnect - so set to 2 since we'll
  	 * still have to disconnect from the current AP first.
  	 */
  	mvm->cca_40mhz_workaround = 2;

  	/*
  	 * This capability manipulation isn't really ideal, but it's the
  	 * easiest choice - otherwise we'd have to do some major changes
  	 * in mac80211 to support this, which isn't worth it. This does
  	 * mean that userspace may have outdated information, but that's
  	 * actually not an issue at all.
  	 */
  	sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ];

  	WARN_ON(!sband->ht_cap.ht_supported);
  	WARN_ON(!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40));
  	sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;

  	he_cap = ieee80211_get_he_iftype_cap(sband,
  					     ieee80211_vif_type_p2p(vif));

  	if (he_cap) {
  		/* we know that ours is writable */
-		struct ieee80211_sta_he_cap *he = (void *)he_cap;
+		struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;

  		WARN_ON(!he->has_he);
  		WARN_ON(!(he->he_cap_elem.phy_cap_info[0] &
  				IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G));
  		he->he_cap_elem.phy_cap_info[0] &=
  			~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
  	}

  	ieee80211_disconnect(vif, true);
  }

  /**
   * enum iwl_rx_handler_context context for Rx handler
   * @RX_HANDLER_SYNC : this means that it will be called in the Rx path
   *	which can't acquire mvm->mutex.
   * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
   *	(and only in this case!), it should be set as ASYNC. In that case,
   *	it will be called from a worker with mvm->mutex held.
   * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
   *	mutex itself, it will be called from a worker without mvm->mutex held.
   */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 4456abb9a074..afd3e69d629b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -18,241 +18,241 @@ 
  /*
   * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command
   * @priv: device private data point
   * @cmd: a pointer to the ucode command structure
   *
   * The function returns < 0 values to indicate the operation
   * failed. On success, it returns the index (>= 0) of command in the
   * command queue.
   */
  int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
  			       struct iwl_host_cmd *cmd)
  {
  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  	struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
  	struct iwl_device_cmd *out_cmd;
  	struct iwl_cmd_meta *out_meta;
  	void *dup_buf = NULL;
  	dma_addr_t phys_addr;
  	int i, cmd_pos, idx;
  	u16 copy_size, cmd_size, tb0_size;
  	bool had_nocopy = false;
  	u8 group_id = iwl_cmd_groupid(cmd->id);
  	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
  	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
  	struct iwl_tfh_tfd *tfd;

  	copy_size = sizeof(struct iwl_cmd_header_wide);
  	cmd_size = sizeof(struct iwl_cmd_header_wide);

  	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
  		cmddata[i] = cmd->data[i];
  		cmdlen[i] = cmd->len[i];

  		if (!cmd->len[i])
  			continue;

  		/* need at least IWL_FIRST_TB_SIZE copied */
  		if (copy_size < IWL_FIRST_TB_SIZE) {
  			int copy = IWL_FIRST_TB_SIZE - copy_size;

  			if (copy > cmdlen[i])
  				copy = cmdlen[i];
  			cmdlen[i] -= copy;
  			cmddata[i] += copy;
  			copy_size += copy;
  		}

  		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
  			had_nocopy = true;
  			if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
  				idx = -EINVAL;
  				goto free_dup_buf;
  			}
  		} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
  			/*
  			 * This is also a chunk that isn't copied
  			 * to the static buffer so set had_nocopy.
  			 */
  			had_nocopy = true;

  			/* only allowed once */
  			if (WARN_ON(dup_buf)) {
  				idx = -EINVAL;
  				goto free_dup_buf;
  			}

  			dup_buf = kmemdup(cmddata[i], cmdlen[i],
  					  GFP_ATOMIC);
  			if (!dup_buf)
  				return -ENOMEM;
  		} else {
  			/* NOCOPY must not be followed by normal! */
  			if (WARN_ON(had_nocopy)) {
  				idx = -EINVAL;
  				goto free_dup_buf;
  			}
  			copy_size += cmdlen[i];
  		}
  		cmd_size += cmd->len[i];
  	}

  	/*
  	 * If any of the command structures end up being larger than the
  	 * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into
  	 * separate TFDs, then we will need to increase the size of the buffers
  	 */
  	if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
  		 "Command %s (%#x) is too large (%d bytes)\n",
  		 iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
  		idx = -EINVAL;
  		goto free_dup_buf;
  	}

  	spin_lock_bh(&txq->lock);

  	idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
  	tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
  	memset(tfd, 0, sizeof(*tfd));

  	if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
  		spin_unlock_bh(&txq->lock);

  		IWL_ERR(trans, "No space in command queue\n");
  		iwl_op_mode_cmd_queue_full(trans->op_mode);
  		idx = -ENOSPC;
  		goto free_dup_buf;
  	}

  	out_cmd = txq->entries[idx].cmd;
  	out_meta = &txq->entries[idx].meta;

  	/* re-initialize to NULL */
  	memset(out_meta, 0, sizeof(*out_meta));
  	if (cmd->flags & CMD_WANT_SKB)
  		out_meta->source = cmd;

  	/* set up the header */
  	out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
  	out_cmd->hdr_wide.group_id = group_id;
  	out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
  	out_cmd->hdr_wide.length =
  		cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
  	out_cmd->hdr_wide.reserved = 0;
  	out_cmd->hdr_wide.sequence =
  		cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
  					 INDEX_TO_SEQ(txq->write_ptr));

  	cmd_pos = sizeof(struct iwl_cmd_header_wide);
  	copy_size = sizeof(struct iwl_cmd_header_wide);

  	/* and copy the data that needs to be copied */
  	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
  		int copy;

  		if (!cmd->len[i])
  			continue;

  		/* copy everything if not nocopy/dup */
  		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
  					   IWL_HCMD_DFL_DUP))) {
  			copy = cmd->len[i];

  			memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
  			cmd_pos += copy;
  			copy_size += copy;
  			continue;
  		}

  		/*
  		 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
  		 * in total (for bi-directional DMA), but copy up to what
  		 * we can fit into the payload for debug dump purposes.
  		 */
  		copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);

  		memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
  		cmd_pos += copy;

  		/* However, treat copy_size the proper way, we need it below */
  		if (copy_size < IWL_FIRST_TB_SIZE) {
  			copy = IWL_FIRST_TB_SIZE - copy_size;

  			if (copy > cmd->len[i])
  				copy = cmd->len[i];
  			copy_size += copy;
  		}
  	}

  	IWL_DEBUG_HC(trans,
  		     "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
  		     iwl_get_cmd_string(trans, cmd->id), group_id,
  		     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
  		     cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);

  	/* start the TFD with the minimum copy bytes */
  	tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
  	memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size);
  	iwl_txq_gen2_set_tb(trans, tfd, iwl_txq_get_first_tb_dma(txq, idx),
  			    tb0_size);

  	/* map first command fragment, if any remains */
  	if (copy_size > tb0_size) {
  		phys_addr = dma_map_single(trans->dev,
  					   (u8 *)out_cmd + tb0_size,
  					   copy_size - tb0_size,
  					   DMA_TO_DEVICE);
  		if (dma_mapping_error(trans->dev, phys_addr)) {
  			idx = -ENOMEM;
  			iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
  			goto out;
  		}
  		iwl_txq_gen2_set_tb(trans, tfd, phys_addr,
  				    copy_size - tb0_size);
  	}

  	/* map the remaining (adjusted) nocopy/dup fragments */
  	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
-		const void *data = cmddata[i];
+		void *data = (void *)(uintptr_t)cmddata[i];

  		if (!cmdlen[i])
  			continue;
  		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
  					   IWL_HCMD_DFL_DUP)))
  			continue;
  		if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
  			data = dup_buf;
-		phys_addr = dma_map_single(trans->dev, (void *)data,
+		phys_addr = dma_map_single(trans->dev, data,
  					   cmdlen[i], DMA_TO_DEVICE);
  		if (dma_mapping_error(trans->dev, phys_addr)) {
  			idx = -ENOMEM;
  			iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
  			goto out;
  		}
  		iwl_txq_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
  	}

  	BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
  	out_meta->flags = cmd->flags;
  	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
  		kfree_sensitive(txq->entries[idx].free_buf);
  	txq->entries[idx].free_buf = dup_buf;

  	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);

  	/* start timer if queue currently empty */
  	if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
  		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);

  	spin_lock(&trans_pcie->reg_lock);
  	/* Increment and update queue's write index */
  	txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
  	iwl_txq_inc_wr_ptr(trans, txq);
  	spin_unlock(&trans_pcie->reg_lock);

  out:
  	spin_unlock_bh(&txq->lock);
  free_dup_buf:
  	if (idx < 0)
  		kfree(dup_buf);
  	return idx;
  }
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 6e02c8b294ac..e9997055f934 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -905,283 +905,283 @@  void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
  /*
   * iwl_pcie_enqueue_hcmd - enqueue a uCode command
   * @priv: device private data point
   * @cmd: a pointer to the ucode command structure
   *
   * The function returns < 0 values to indicate the operation
   * failed. On success, it returns the index (>= 0) of command in the
   * command queue.
   */
  int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
  			  struct iwl_host_cmd *cmd)
  {
  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  	struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
  	struct iwl_device_cmd *out_cmd;
  	struct iwl_cmd_meta *out_meta;
  	void *dup_buf = NULL;
  	dma_addr_t phys_addr;
  	int idx;
  	u16 copy_size, cmd_size, tb0_size;
  	bool had_nocopy = false;
  	u8 group_id = iwl_cmd_groupid(cmd->id);
  	int i, ret;
  	u32 cmd_pos;
  	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
  	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];

  	if (WARN(!trans->wide_cmd_header &&
  		 group_id > IWL_ALWAYS_LONG_GROUP,
  		 "unsupported wide command %#x\n", cmd->id))
  		return -EINVAL;

  	if (group_id != 0) {
  		copy_size = sizeof(struct iwl_cmd_header_wide);
  		cmd_size = sizeof(struct iwl_cmd_header_wide);
  	} else {
  		copy_size = sizeof(struct iwl_cmd_header);
  		cmd_size = sizeof(struct iwl_cmd_header);
  	}

  	/* need one for the header if the first is NOCOPY */
  	BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);

  	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
  		cmddata[i] = cmd->data[i];
  		cmdlen[i] = cmd->len[i];

  		if (!cmd->len[i])
  			continue;

  		/* need at least IWL_FIRST_TB_SIZE copied */
  		if (copy_size < IWL_FIRST_TB_SIZE) {
  			int copy = IWL_FIRST_TB_SIZE - copy_size;

  			if (copy > cmdlen[i])
  				copy = cmdlen[i];
  			cmdlen[i] -= copy;
  			cmddata[i] += copy;
  			copy_size += copy;
  		}

  		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
  			had_nocopy = true;
  			if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
  				idx = -EINVAL;
  				goto free_dup_buf;
  			}
  		} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
  			/*
  			 * This is also a chunk that isn't copied
  			 * to the static buffer so set had_nocopy.
  			 */
  			had_nocopy = true;

  			/* only allowed once */
  			if (WARN_ON(dup_buf)) {
  				idx = -EINVAL;
  				goto free_dup_buf;
  			}

  			dup_buf = kmemdup(cmddata[i], cmdlen[i],
  					  GFP_ATOMIC);
  			if (!dup_buf)
  				return -ENOMEM;
  		} else {
  			/* NOCOPY must not be followed by normal! */
  			if (WARN_ON(had_nocopy)) {
  				idx = -EINVAL;
  				goto free_dup_buf;
  			}
  			copy_size += cmdlen[i];
  		}
  		cmd_size += cmd->len[i];
  	}

  	/*
  	 * If any of the command structures end up being larger than
  	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
  	 * allocated into separate TFDs, then we will need to
  	 * increase the size of the buffers.
  	 */
  	if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
  		 "Command %s (%#x) is too large (%d bytes)\n",
  		 iwl_get_cmd_string(trans, cmd->id),
  		 cmd->id, copy_size)) {
  		idx = -EINVAL;
  		goto free_dup_buf;
  	}

  	spin_lock_bh(&txq->lock);

  	if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
  		spin_unlock_bh(&txq->lock);

  		IWL_ERR(trans, "No space in command queue\n");
  		iwl_op_mode_cmd_queue_full(trans->op_mode);
  		idx = -ENOSPC;
  		goto free_dup_buf;
  	}

  	idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
  	out_cmd = txq->entries[idx].cmd;
  	out_meta = &txq->entries[idx].meta;

  	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
  	if (cmd->flags & CMD_WANT_SKB)
  		out_meta->source = cmd;

  	/* set up the header */
  	if (group_id != 0) {
  		out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
  		out_cmd->hdr_wide.group_id = group_id;
  		out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
  		out_cmd->hdr_wide.length =
  			cpu_to_le16(cmd_size -
  				    sizeof(struct iwl_cmd_header_wide));
  		out_cmd->hdr_wide.reserved = 0;
  		out_cmd->hdr_wide.sequence =
  			cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
  						 INDEX_TO_SEQ(txq->write_ptr));

  		cmd_pos = sizeof(struct iwl_cmd_header_wide);
  		copy_size = sizeof(struct iwl_cmd_header_wide);
  	} else {
  		out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
  		out_cmd->hdr.sequence =
  			cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
  						 INDEX_TO_SEQ(txq->write_ptr));
  		out_cmd->hdr.group_id = 0;

  		cmd_pos = sizeof(struct iwl_cmd_header);
  		copy_size = sizeof(struct iwl_cmd_header);
  	}

  	/* and copy the data that needs to be copied */
  	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
  		int copy;

  		if (!cmd->len[i])
  			continue;

  		/* copy everything if not nocopy/dup */
  		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
  					   IWL_HCMD_DFL_DUP))) {
  			copy = cmd->len[i];

  			memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
  			cmd_pos += copy;
  			copy_size += copy;
  			continue;
  		}

  		/*
  		 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
  		 * in total (for bi-directional DMA), but copy up to what
  		 * we can fit into the payload for debug dump purposes.
  		 */
  		copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);

  		memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
  		cmd_pos += copy;

  		/* However, treat copy_size the proper way, we need it below */
  		if (copy_size < IWL_FIRST_TB_SIZE) {
  			copy = IWL_FIRST_TB_SIZE - copy_size;

  			if (copy > cmd->len[i])
  				copy = cmd->len[i];
  			copy_size += copy;
  		}
  	}

  	IWL_DEBUG_HC(trans,
  		     "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
  		     iwl_get_cmd_string(trans, cmd->id),
  		     group_id, out_cmd->hdr.cmd,
  		     le16_to_cpu(out_cmd->hdr.sequence),
  		     cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);

  	/* start the TFD with the minimum copy bytes */
  	tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
  	memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
  	iwl_pcie_txq_build_tfd(trans, txq,
  			       iwl_txq_get_first_tb_dma(txq, idx),
  			       tb0_size, true);

  	/* map first command fragment, if any remains */
  	if (copy_size > tb0_size) {
  		phys_addr = dma_map_single(trans->dev,
  					   ((u8 *)&out_cmd->hdr) + tb0_size,
  					   copy_size - tb0_size,
  					   DMA_TO_DEVICE);
  		if (dma_mapping_error(trans->dev, phys_addr)) {
  			iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
  					       txq->write_ptr);
  			idx = -ENOMEM;
  			goto out;
  		}

  		iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
  				       copy_size - tb0_size, false);
  	}

  	/* map the remaining (adjusted) nocopy/dup fragments */
  	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
-		const void *data = cmddata[i];
+		void *data = (void *)(uintptr_t)cmddata[i];

  		if (!cmdlen[i])
  			continue;
  		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
  					   IWL_HCMD_DFL_DUP)))
  			continue;
  		if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
  			data = dup_buf;
-		phys_addr = dma_map_single(trans->dev, (void *)data,
+		phys_addr = dma_map_single(trans->dev, data,
  					   cmdlen[i], DMA_TO_DEVICE);
  		if (dma_mapping_error(trans->dev, phys_addr)) {
  			iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
  					       txq->write_ptr);
  			idx = -ENOMEM;
  			goto out;
  		}

  		iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
  	}

  	BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
  	out_meta->flags = cmd->flags;
  	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
  		kfree_sensitive(txq->entries[idx].free_buf);
  	txq->entries[idx].free_buf = dup_buf;

  	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);

  	/* start timer if queue currently empty */
  	if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
  		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);

  	spin_lock(&trans_pcie->reg_lock);
  	ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
  	if (ret < 0) {
  		idx = ret;
  		goto unlock_reg;
  	}

  	/* Increment and update queue's write index */
  	txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
  	iwl_pcie_txq_inc_wr_ptr(trans, txq);

   unlock_reg:
  	spin_unlock(&trans_pcie->reg_lock);
   out:
  	spin_unlock_bh(&txq->lock);
   free_dup_buf:
  	if (idx < 0)
  		kfree(dup_buf);
  	return idx;
  }

  /*
   * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
   * @rxb: Rx buffer to reclaim
   */