Merge branch 'for-john' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next
Conflicts: drivers/net/wireless/iwlwifi/pcie/trans.c
This commit is contained in:
commit
dfbebe1442
17 changed files with 183 additions and 112 deletions
|
@ -133,12 +133,3 @@ config IWLWIFI_P2P
|
||||||
support when it is loaded.
|
support when it is loaded.
|
||||||
|
|
||||||
Say Y only if you want to experiment with P2P.
|
Say Y only if you want to experiment with P2P.
|
||||||
|
|
||||||
config IWLWIFI_EXPERIMENTAL_MFP
|
|
||||||
bool "support MFP (802.11w) even if uCode doesn't advertise"
|
|
||||||
depends on IWLWIFI
|
|
||||||
help
|
|
||||||
This option enables experimental MFP (802.11W) support
|
|
||||||
even if the microcode doesn't advertise it.
|
|
||||||
|
|
||||||
Say Y only if you want to experiment with MFP.
|
|
||||||
|
|
|
@ -176,8 +176,8 @@ int iwlagn_hw_valid_rtc_data_addr(u32 addr);
|
||||||
/* lib */
|
/* lib */
|
||||||
int iwlagn_send_tx_power(struct iwl_priv *priv);
|
int iwlagn_send_tx_power(struct iwl_priv *priv);
|
||||||
void iwlagn_temperature(struct iwl_priv *priv);
|
void iwlagn_temperature(struct iwl_priv *priv);
|
||||||
int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
|
int iwlagn_txfifo_flush(struct iwl_priv *priv);
|
||||||
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
|
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv);
|
||||||
int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
|
int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
|
||||||
int iwl_send_statistics_request(struct iwl_priv *priv,
|
int iwl_send_statistics_request(struct iwl_priv *priv,
|
||||||
u8 flags, bool clear);
|
u8 flags, bool clear);
|
||||||
|
|
|
@ -986,8 +986,7 @@ struct iwl_rem_sta_cmd {
|
||||||
|
|
||||||
#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
|
#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
|
||||||
|
|
||||||
#define IWL_DROP_SINGLE 0
|
#define IWL_DROP_ALL BIT(1)
|
||||||
#define IWL_DROP_ALL (BIT(IWL_RXON_CTX_BSS) | BIT(IWL_RXON_CTX_PAN))
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* REPLY_TXFIFO_FLUSH = 0x1e(command and response)
|
* REPLY_TXFIFO_FLUSH = 0x1e(command and response)
|
||||||
|
@ -1004,14 +1003,14 @@ struct iwl_rem_sta_cmd {
|
||||||
* the flush operation ends when both the scheduler DMA done and TXFIFO empty
|
* the flush operation ends when both the scheduler DMA done and TXFIFO empty
|
||||||
* are set.
|
* are set.
|
||||||
*
|
*
|
||||||
* @fifo_control: bit mask for which queues to flush
|
* @queue_control: bit mask for which queues to flush
|
||||||
* @flush_control: flush controls
|
* @flush_control: flush controls
|
||||||
* 0: Dump single MSDU
|
* 0: Dump single MSDU
|
||||||
* 1: Dump multiple MSDU according to PS, INVALID STA, TTL, TID disable.
|
* 1: Dump multiple MSDU according to PS, INVALID STA, TTL, TID disable.
|
||||||
* 2: Dump all FIFO
|
* 2: Dump all FIFO
|
||||||
*/
|
*/
|
||||||
struct iwl_txfifo_flush_cmd {
|
struct iwl_txfifo_flush_cmd {
|
||||||
__le32 fifo_control;
|
__le32 queue_control;
|
||||||
__le16 flush_control;
|
__le16 flush_control;
|
||||||
__le16 reserved;
|
__le16 reserved;
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
|
@ -2101,7 +2101,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
|
||||||
if (iwl_is_rfkill(priv))
|
if (iwl_is_rfkill(priv))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
|
iwlagn_dev_txfifo_flush(priv);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
|
@ -136,7 +136,7 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
|
||||||
* 1. acquire mutex before calling
|
* 1. acquire mutex before calling
|
||||||
* 2. make sure rf is on and not in exit state
|
* 2. make sure rf is on and not in exit state
|
||||||
*/
|
*/
|
||||||
int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
|
int iwlagn_txfifo_flush(struct iwl_priv *priv)
|
||||||
{
|
{
|
||||||
struct iwl_txfifo_flush_cmd flush_cmd;
|
struct iwl_txfifo_flush_cmd flush_cmd;
|
||||||
struct iwl_host_cmd cmd = {
|
struct iwl_host_cmd cmd = {
|
||||||
|
@ -146,35 +146,34 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
|
||||||
.data = { &flush_cmd, },
|
.data = { &flush_cmd, },
|
||||||
};
|
};
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
|
|
||||||
memset(&flush_cmd, 0, sizeof(flush_cmd));
|
memset(&flush_cmd, 0, sizeof(flush_cmd));
|
||||||
if (flush_control & BIT(IWL_RXON_CTX_BSS))
|
|
||||||
flush_cmd.fifo_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
|
flush_cmd.queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
|
||||||
IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
|
IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
|
||||||
IWL_SCD_MGMT_MSK;
|
IWL_SCD_MGMT_MSK;
|
||||||
if ((flush_control & BIT(IWL_RXON_CTX_PAN)) &&
|
if ((priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
|
||||||
(priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
|
flush_cmd.queue_control |= IWL_PAN_SCD_VO_MSK |
|
||||||
flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK |
|
IWL_PAN_SCD_VI_MSK |
|
||||||
IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK |
|
IWL_PAN_SCD_BE_MSK |
|
||||||
IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
|
IWL_PAN_SCD_BK_MSK |
|
||||||
IWL_PAN_SCD_MULTICAST_MSK;
|
IWL_PAN_SCD_MGMT_MSK |
|
||||||
|
IWL_PAN_SCD_MULTICAST_MSK;
|
||||||
|
|
||||||
if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE)
|
if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE)
|
||||||
flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
|
flush_cmd.queue_control |= IWL_AGG_TX_QUEUE_MSK;
|
||||||
|
|
||||||
IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
|
IWL_DEBUG_INFO(priv, "queue control: 0x%x\n",
|
||||||
flush_cmd.fifo_control);
|
flush_cmd.queue_control);
|
||||||
flush_cmd.flush_control = cpu_to_le16(flush_control);
|
flush_cmd.flush_control = cpu_to_le16(IWL_DROP_ALL);
|
||||||
|
|
||||||
return iwl_dvm_send_cmd(priv, &cmd);
|
return iwl_dvm_send_cmd(priv, &cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
|
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
|
||||||
{
|
{
|
||||||
mutex_lock(&priv->mutex);
|
mutex_lock(&priv->mutex);
|
||||||
ieee80211_stop_queues(priv->hw);
|
ieee80211_stop_queues(priv->hw);
|
||||||
if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
|
if (iwlagn_txfifo_flush(priv)) {
|
||||||
IWL_ERR(priv, "flush request fail\n");
|
IWL_ERR(priv, "flush request fail\n");
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
|
@ -168,10 +168,8 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
|
||||||
hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
|
hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
|
||||||
IEEE80211_HW_SUPPORTS_STATIC_SMPS;
|
IEEE80211_HW_SUPPORTS_STATIC_SMPS;
|
||||||
|
|
||||||
#ifndef CONFIG_IWLWIFI_EXPERIMENTAL_MFP
|
|
||||||
/* enable 11w if the uCode advertise */
|
/* enable 11w if the uCode advertise */
|
||||||
if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP)
|
if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP)
|
||||||
#endif /* !CONFIG_IWLWIFI_EXPERIMENTAL_MFP */
|
|
||||||
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
|
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
|
||||||
|
|
||||||
hw->sta_data_size = sizeof(struct iwl_station_priv);
|
hw->sta_data_size = sizeof(struct iwl_station_priv);
|
||||||
|
@ -1019,7 +1017,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
|
||||||
*/
|
*/
|
||||||
if (drop) {
|
if (drop) {
|
||||||
IWL_DEBUG_MAC80211(priv, "send flush command\n");
|
IWL_DEBUG_MAC80211(priv, "send flush command\n");
|
||||||
if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
|
if (iwlagn_txfifo_flush(priv)) {
|
||||||
IWL_ERR(priv, "flush request fail\n");
|
IWL_ERR(priv, "flush request fail\n");
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
|
@ -511,7 +511,7 @@ static void iwl_bg_tx_flush(struct work_struct *work)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
|
IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
|
||||||
iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
|
iwlagn_dev_txfifo_flush(priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1204,7 +1204,7 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
IWL_INFO(priv, "Device SKU: 0x%X\n", priv->eeprom_data->sku);
|
IWL_DEBUG_INFO(priv, "Device SKU: 0x%X\n", priv->eeprom_data->sku);
|
||||||
|
|
||||||
priv->hw_params.tx_chains_num =
|
priv->hw_params.tx_chains_num =
|
||||||
num_of_ant(priv->eeprom_data->valid_tx_ant);
|
num_of_ant(priv->eeprom_data->valid_tx_ant);
|
||||||
|
@ -1214,9 +1214,9 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
|
||||||
priv->hw_params.rx_chains_num =
|
priv->hw_params.rx_chains_num =
|
||||||
num_of_ant(priv->eeprom_data->valid_rx_ant);
|
num_of_ant(priv->eeprom_data->valid_rx_ant);
|
||||||
|
|
||||||
IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
|
IWL_DEBUG_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
|
||||||
priv->eeprom_data->valid_tx_ant,
|
priv->eeprom_data->valid_tx_ant,
|
||||||
priv->eeprom_data->valid_rx_ant);
|
priv->eeprom_data->valid_rx_ant);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1231,7 +1231,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
|
||||||
struct iwl_op_mode *op_mode;
|
struct iwl_op_mode *op_mode;
|
||||||
u16 num_mac;
|
u16 num_mac;
|
||||||
u32 ucode_flags;
|
u32 ucode_flags;
|
||||||
struct iwl_trans_config trans_cfg;
|
struct iwl_trans_config trans_cfg = {};
|
||||||
static const u8 no_reclaim_cmds[] = {
|
static const u8 no_reclaim_cmds[] = {
|
||||||
REPLY_RX_PHY_CMD,
|
REPLY_RX_PHY_CMD,
|
||||||
REPLY_RX_MPDU_CMD,
|
REPLY_RX_MPDU_CMD,
|
||||||
|
@ -1507,10 +1507,6 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
|
||||||
|
|
||||||
iwl_tt_exit(priv);
|
iwl_tt_exit(priv);
|
||||||
|
|
||||||
/*This will stop the queues, move the device to low power state */
|
|
||||||
priv->ucode_loaded = false;
|
|
||||||
iwl_trans_stop_device(priv->trans);
|
|
||||||
|
|
||||||
kfree(priv->eeprom_blob);
|
kfree(priv->eeprom_blob);
|
||||||
iwl_free_eeprom_data(priv->eeprom_data);
|
iwl_free_eeprom_data(priv->eeprom_data);
|
||||||
|
|
||||||
|
@ -1926,8 +1922,6 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
|
||||||
* commands by clearing the ready bit */
|
* commands by clearing the ready bit */
|
||||||
clear_bit(STATUS_READY, &priv->status);
|
clear_bit(STATUS_READY, &priv->status);
|
||||||
|
|
||||||
wake_up(&priv->trans->wait_command_queue);
|
|
||||||
|
|
||||||
if (!ondemand) {
|
if (!ondemand) {
|
||||||
/*
|
/*
|
||||||
* If firmware keep reloading, then it indicate something
|
* If firmware keep reloading, then it indicate something
|
||||||
|
|
|
@ -631,8 +631,6 @@ static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
|
||||||
test_bit(STATUS_RF_KILL_HW, &priv->status)))
|
test_bit(STATUS_RF_KILL_HW, &priv->status)))
|
||||||
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
|
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
|
||||||
test_bit(STATUS_RF_KILL_HW, &priv->status));
|
test_bit(STATUS_RF_KILL_HW, &priv->status));
|
||||||
else
|
|
||||||
wake_up(&priv->trans->wait_command_queue);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1075,14 +1075,11 @@ static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
|
||||||
|
|
||||||
static void iwlagn_set_tx_status(struct iwl_priv *priv,
|
static void iwlagn_set_tx_status(struct iwl_priv *priv,
|
||||||
struct ieee80211_tx_info *info,
|
struct ieee80211_tx_info *info,
|
||||||
struct iwlagn_tx_resp *tx_resp,
|
struct iwlagn_tx_resp *tx_resp)
|
||||||
bool is_agg)
|
|
||||||
{
|
{
|
||||||
u16 status = le16_to_cpu(tx_resp->status.status);
|
u16 status = le16_to_cpu(tx_resp->status.status);
|
||||||
|
|
||||||
info->status.rates[0].count = tx_resp->failure_frame + 1;
|
info->status.rates[0].count = tx_resp->failure_frame + 1;
|
||||||
if (is_agg)
|
|
||||||
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
|
|
||||||
info->flags |= iwl_tx_status_to_mac80211(status);
|
info->flags |= iwl_tx_status_to_mac80211(status);
|
||||||
iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
|
iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
|
||||||
info);
|
info);
|
||||||
|
@ -1231,7 +1228,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
|
||||||
if (is_agg && !iwl_is_tx_success(status))
|
if (is_agg && !iwl_is_tx_success(status))
|
||||||
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
|
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
|
||||||
iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
|
iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
|
||||||
tx_resp, is_agg);
|
tx_resp);
|
||||||
if (!is_agg)
|
if (!is_agg)
|
||||||
iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
|
iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
|
||||||
|
|
||||||
|
|
|
@ -254,7 +254,7 @@ static int iwl_alive_notify(struct iwl_priv *priv)
|
||||||
int ret;
|
int ret;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
iwl_trans_fw_alive(priv->trans);
|
iwl_trans_fw_alive(priv->trans, 0);
|
||||||
|
|
||||||
if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN &&
|
if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN &&
|
||||||
priv->eeprom_data->sku & EEPROM_SKU_CAP_IPAN_ENABLE) {
|
priv->eeprom_data->sku & EEPROM_SKU_CAP_IPAN_ENABLE) {
|
||||||
|
|
|
@ -306,7 +306,7 @@ TRACE_EVENT(iwlwifi_dev_rx_data,
|
||||||
memcpy(__get_dynamic_array(data),
|
memcpy(__get_dynamic_array(data),
|
||||||
((u8 *)rxbuf) + offs, len - offs);
|
((u8 *)rxbuf) + offs, len - offs);
|
||||||
),
|
),
|
||||||
TP_printk("[%s] TX frame data", __get_str(dev))
|
TP_printk("[%s] RX frame data", __get_str(dev))
|
||||||
);
|
);
|
||||||
|
|
||||||
#undef TRACE_SYSTEM
|
#undef TRACE_SYSTEM
|
||||||
|
|
|
@ -889,8 +889,8 @@ int iwl_eeprom_check_version(struct iwl_eeprom_data *data,
|
||||||
{
|
{
|
||||||
if (data->eeprom_version >= trans->cfg->eeprom_ver ||
|
if (data->eeprom_version >= trans->cfg->eeprom_ver ||
|
||||||
data->calib_version >= trans->cfg->eeprom_calib_ver) {
|
data->calib_version >= trans->cfg->eeprom_calib_ver) {
|
||||||
IWL_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
|
IWL_DEBUG_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
|
||||||
data->eeprom_version, data->calib_version);
|
data->eeprom_version, data->calib_version);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -221,14 +221,21 @@ struct iwl_device_cmd {
|
||||||
/**
|
/**
|
||||||
* struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
|
* struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
|
||||||
*
|
*
|
||||||
* IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
|
* @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
|
||||||
* ring. The transport layer doesn't map the command's buffer to DMA, but
|
* ring. The transport layer doesn't map the command's buffer to DMA, but
|
||||||
* rather copies it to an previously allocated DMA buffer. This flag tells
|
* rather copies it to an previously allocated DMA buffer. This flag tells
|
||||||
* the transport layer not to copy the command, but to map the existing
|
* the transport layer not to copy the command, but to map the existing
|
||||||
* buffer. This can save memcpy and is worth with very big comamnds.
|
* buffer (that is passed in) instead. This saves the memcpy and allows
|
||||||
|
* commands that are bigger than the fixed buffer to be submitted.
|
||||||
|
* Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
|
||||||
|
* @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
|
||||||
|
* chunk internally and free it again after the command completes. This
|
||||||
|
* can (currently) be used only once per command.
|
||||||
|
* Note that a TFD entry after a DUP one cannot be a normal copied one.
|
||||||
*/
|
*/
|
||||||
enum iwl_hcmd_dataflag {
|
enum iwl_hcmd_dataflag {
|
||||||
IWL_HCMD_DFL_NOCOPY = BIT(0),
|
IWL_HCMD_DFL_NOCOPY = BIT(0),
|
||||||
|
IWL_HCMD_DFL_DUP = BIT(1),
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -348,14 +355,17 @@ struct iwl_trans;
|
||||||
* @start_fw: allocates and inits all the resources for the transport
|
* @start_fw: allocates and inits all the resources for the transport
|
||||||
* layer. Also kick a fw image.
|
* layer. Also kick a fw image.
|
||||||
* May sleep
|
* May sleep
|
||||||
* @fw_alive: called when the fw sends alive notification
|
* @fw_alive: called when the fw sends alive notification. If the fw provides
|
||||||
|
* the SCD base address in SRAM, then provide it here, or 0 otherwise.
|
||||||
* May sleep
|
* May sleep
|
||||||
* @stop_device:stops the whole device (embedded CPU put to reset)
|
* @stop_device:stops the whole device (embedded CPU put to reset)
|
||||||
* May sleep
|
* May sleep
|
||||||
* @wowlan_suspend: put the device into the correct mode for WoWLAN during
|
* @wowlan_suspend: put the device into the correct mode for WoWLAN during
|
||||||
* suspend. This is optional, if not implemented WoWLAN will not be
|
* suspend. This is optional, if not implemented WoWLAN will not be
|
||||||
* supported. This callback may sleep.
|
* supported. This callback may sleep.
|
||||||
* @send_cmd:send a host command
|
* @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
|
||||||
|
* If RFkill is asserted in the middle of a SYNC host command, it must
|
||||||
|
* return -ERFKILL straight away.
|
||||||
* May sleep only if CMD_SYNC is set
|
* May sleep only if CMD_SYNC is set
|
||||||
* @tx: send an skb
|
* @tx: send an skb
|
||||||
* Must be atomic
|
* Must be atomic
|
||||||
|
@ -385,7 +395,7 @@ struct iwl_trans_ops {
|
||||||
int (*start_hw)(struct iwl_trans *iwl_trans);
|
int (*start_hw)(struct iwl_trans *iwl_trans);
|
||||||
void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving);
|
void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving);
|
||||||
int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw);
|
int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw);
|
||||||
void (*fw_alive)(struct iwl_trans *trans);
|
void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
|
||||||
void (*stop_device)(struct iwl_trans *trans);
|
void (*stop_device)(struct iwl_trans *trans);
|
||||||
|
|
||||||
void (*wowlan_suspend)(struct iwl_trans *trans);
|
void (*wowlan_suspend)(struct iwl_trans *trans);
|
||||||
|
@ -438,7 +448,6 @@ enum iwl_trans_state {
|
||||||
* Set during transport allocation.
|
* Set during transport allocation.
|
||||||
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
|
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
|
||||||
* @pm_support: set to true in start_hw if link pm is supported
|
* @pm_support: set to true in start_hw if link pm is supported
|
||||||
* @wait_command_queue: the wait_queue for SYNC host commands
|
|
||||||
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
|
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
|
||||||
* The user should use iwl_trans_{alloc,free}_tx_cmd.
|
* The user should use iwl_trans_{alloc,free}_tx_cmd.
|
||||||
* @dev_cmd_headroom: room needed for the transport's private use before the
|
* @dev_cmd_headroom: room needed for the transport's private use before the
|
||||||
|
@ -465,8 +474,6 @@ struct iwl_trans {
|
||||||
|
|
||||||
bool pm_support;
|
bool pm_support;
|
||||||
|
|
||||||
wait_queue_head_t wait_command_queue;
|
|
||||||
|
|
||||||
/* The following fields are internal only */
|
/* The following fields are internal only */
|
||||||
struct kmem_cache *dev_cmd_pool;
|
struct kmem_cache *dev_cmd_pool;
|
||||||
size_t dev_cmd_headroom;
|
size_t dev_cmd_headroom;
|
||||||
|
@ -508,13 +515,13 @@ static inline void iwl_trans_stop_hw(struct iwl_trans *trans,
|
||||||
trans->state = IWL_TRANS_NO_FW;
|
trans->state = IWL_TRANS_NO_FW;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void iwl_trans_fw_alive(struct iwl_trans *trans)
|
static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
|
||||||
{
|
{
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
|
||||||
trans->state = IWL_TRANS_FW_ALIVE;
|
trans->state = IWL_TRANS_FW_ALIVE;
|
||||||
|
|
||||||
trans->ops->fw_alive(trans);
|
trans->ops->fw_alive(trans, scd_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int iwl_trans_start_fw(struct iwl_trans *trans,
|
static inline int iwl_trans_start_fw(struct iwl_trans *trans,
|
||||||
|
|
|
@ -186,6 +186,8 @@ struct iwl_pcie_tx_queue_entry {
|
||||||
struct iwl_device_cmd *cmd;
|
struct iwl_device_cmd *cmd;
|
||||||
struct iwl_device_cmd *copy_cmd;
|
struct iwl_device_cmd *copy_cmd;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
/* buffer to free after command completes */
|
||||||
|
const void *free_buf;
|
||||||
struct iwl_cmd_meta meta;
|
struct iwl_cmd_meta meta;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -268,6 +270,8 @@ struct iwl_trans_pcie {
|
||||||
|
|
||||||
bool ucode_write_complete;
|
bool ucode_write_complete;
|
||||||
wait_queue_head_t ucode_write_waitq;
|
wait_queue_head_t ucode_write_waitq;
|
||||||
|
wait_queue_head_t wait_command_queue;
|
||||||
|
|
||||||
unsigned long status;
|
unsigned long status;
|
||||||
u8 cmd_queue;
|
u8 cmd_queue;
|
||||||
u8 cmd_fifo;
|
u8 cmd_fifo;
|
||||||
|
@ -286,10 +290,14 @@ struct iwl_trans_pcie {
|
||||||
/*****************************************************
|
/*****************************************************
|
||||||
* DRIVER STATUS FUNCTIONS
|
* DRIVER STATUS FUNCTIONS
|
||||||
******************************************************/
|
******************************************************/
|
||||||
#define STATUS_HCMD_ACTIVE 0
|
enum {
|
||||||
#define STATUS_DEVICE_ENABLED 1
|
STATUS_HCMD_ACTIVE,
|
||||||
#define STATUS_TPOWER_PMI 2
|
STATUS_DEVICE_ENABLED,
|
||||||
#define STATUS_INT_ENABLED 3
|
STATUS_TPOWER_PMI,
|
||||||
|
STATUS_INT_ENABLED,
|
||||||
|
STATUS_RFKILL,
|
||||||
|
STATUS_FW_ERROR,
|
||||||
|
};
|
||||||
|
|
||||||
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
|
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
|
||||||
((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
|
((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
|
||||||
|
@ -346,6 +354,7 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
|
||||||
enum dma_data_direction dma_dir);
|
enum dma_data_direction dma_dir);
|
||||||
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
|
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
|
||||||
struct sk_buff_head *skbs);
|
struct sk_buff_head *skbs);
|
||||||
|
void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id);
|
||||||
int iwl_queue_space(const struct iwl_queue *q);
|
int iwl_queue_space(const struct iwl_queue *q);
|
||||||
|
|
||||||
/*****************************************************
|
/*****************************************************
|
||||||
|
|
|
@ -199,7 +199,6 @@ static void iwl_rx_queue_restock(struct iwl_trans *trans)
|
||||||
{
|
{
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
|
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
|
||||||
struct list_head *element;
|
|
||||||
struct iwl_rx_mem_buffer *rxb;
|
struct iwl_rx_mem_buffer *rxb;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -221,9 +220,9 @@ static void iwl_rx_queue_restock(struct iwl_trans *trans)
|
||||||
BUG_ON(rxb && rxb->page);
|
BUG_ON(rxb && rxb->page);
|
||||||
|
|
||||||
/* Get next free Rx buffer, remove from free list */
|
/* Get next free Rx buffer, remove from free list */
|
||||||
element = rxq->rx_free.next;
|
rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
|
||||||
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
|
list);
|
||||||
list_del(element);
|
list_del(&rxb->list);
|
||||||
|
|
||||||
/* Point to Rx buffer via next RBD in circular buffer */
|
/* Point to Rx buffer via next RBD in circular buffer */
|
||||||
rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(rxb->page_dma);
|
rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(rxb->page_dma);
|
||||||
|
@ -260,7 +259,6 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
|
||||||
{
|
{
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
|
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
|
||||||
struct list_head *element;
|
|
||||||
struct iwl_rx_mem_buffer *rxb;
|
struct iwl_rx_mem_buffer *rxb;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -308,10 +306,9 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
|
||||||
__free_pages(page, trans_pcie->rx_page_order);
|
__free_pages(page, trans_pcie->rx_page_order);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
element = rxq->rx_used.next;
|
rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
|
||||||
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
|
list);
|
||||||
list_del(element);
|
list_del(&rxb->list);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||||
|
|
||||||
BUG_ON(rxb->page);
|
BUG_ON(rxb->page);
|
||||||
|
@ -452,6 +449,9 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
|
||||||
/* The original command isn't needed any more */
|
/* The original command isn't needed any more */
|
||||||
kfree(txq->entries[cmd_index].copy_cmd);
|
kfree(txq->entries[cmd_index].copy_cmd);
|
||||||
txq->entries[cmd_index].copy_cmd = NULL;
|
txq->entries[cmd_index].copy_cmd = NULL;
|
||||||
|
/* nor is the duplicated part of the command */
|
||||||
|
kfree(txq->entries[cmd_index].free_buf);
|
||||||
|
txq->entries[cmd_index].free_buf = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -565,24 +565,27 @@ static void iwl_rx_handle(struct iwl_trans *trans)
|
||||||
*/
|
*/
|
||||||
static void iwl_irq_handle_error(struct iwl_trans *trans)
|
static void iwl_irq_handle_error(struct iwl_trans *trans)
|
||||||
{
|
{
|
||||||
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
|
|
||||||
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
|
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
|
||||||
if (trans->cfg->internal_wimax_coex &&
|
if (trans->cfg->internal_wimax_coex &&
|
||||||
(!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
|
(!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
|
||||||
APMS_CLK_VAL_MRB_FUNC_MODE) ||
|
APMS_CLK_VAL_MRB_FUNC_MODE) ||
|
||||||
(iwl_read_prph(trans, APMG_PS_CTRL_REG) &
|
(iwl_read_prph(trans, APMG_PS_CTRL_REG) &
|
||||||
APMG_PS_CTRL_VAL_RESET_REQ))) {
|
APMG_PS_CTRL_VAL_RESET_REQ))) {
|
||||||
struct iwl_trans_pcie *trans_pcie =
|
|
||||||
IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
||||||
|
|
||||||
clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
|
clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
|
||||||
iwl_op_mode_wimax_active(trans->op_mode);
|
iwl_op_mode_wimax_active(trans->op_mode);
|
||||||
wake_up(&trans->wait_command_queue);
|
wake_up(&trans_pcie->wait_command_queue);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
iwl_dump_csr(trans);
|
iwl_dump_csr(trans);
|
||||||
iwl_dump_fh(trans, NULL);
|
iwl_dump_fh(trans, NULL);
|
||||||
|
|
||||||
|
set_bit(STATUS_FW_ERROR, &trans_pcie->status);
|
||||||
|
clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
|
||||||
|
wake_up(&trans_pcie->wait_command_queue);
|
||||||
|
|
||||||
iwl_op_mode_nic_error(trans->op_mode);
|
iwl_op_mode_nic_error(trans->op_mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -676,6 +679,16 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
|
||||||
isr_stats->rfkill++;
|
isr_stats->rfkill++;
|
||||||
|
|
||||||
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
|
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
|
||||||
|
if (hw_rfkill) {
|
||||||
|
set_bit(STATUS_RFKILL, &trans_pcie->status);
|
||||||
|
if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
|
||||||
|
&trans_pcie->status))
|
||||||
|
IWL_DEBUG_RF_KILL(trans,
|
||||||
|
"Rfkill while SYNC HCMD in flight\n");
|
||||||
|
wake_up(&trans_pcie->wait_command_queue);
|
||||||
|
} else {
|
||||||
|
clear_bit(STATUS_RFKILL, &trans_pcie->status);
|
||||||
|
}
|
||||||
|
|
||||||
handled |= CSR_INT_BIT_RF_KILL;
|
handled |= CSR_INT_BIT_RF_KILL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -442,10 +442,10 @@ static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
|
* iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
|
||||||
*/
|
*/
|
||||||
static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
|
void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
|
||||||
{
|
{
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
|
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
|
||||||
|
@ -496,6 +496,7 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
|
||||||
for (i = 0; i < txq->q.n_window; i++) {
|
for (i = 0; i < txq->q.n_window; i++) {
|
||||||
kfree(txq->entries[i].cmd);
|
kfree(txq->entries[i].cmd);
|
||||||
kfree(txq->entries[i].copy_cmd);
|
kfree(txq->entries[i].copy_cmd);
|
||||||
|
kfree(txq->entries[i].free_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* De-alloc circular buffer of TFDs */
|
/* De-alloc circular buffer of TFDs */
|
||||||
|
@ -1023,6 +1024,7 @@ static int iwl_load_given_ucode(struct iwl_trans *trans,
|
||||||
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
|
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
|
||||||
const struct fw_img *fw)
|
const struct fw_img *fw)
|
||||||
{
|
{
|
||||||
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
int ret;
|
int ret;
|
||||||
bool hw_rfkill;
|
bool hw_rfkill;
|
||||||
|
|
||||||
|
@ -1032,6 +1034,8 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
clear_bit(STATUS_FW_ERROR, &trans_pcie->status);
|
||||||
|
|
||||||
iwl_enable_rfkill_int(trans);
|
iwl_enable_rfkill_int(trans);
|
||||||
|
|
||||||
/* If platform's RF_KILL switch is NOT set to KILL */
|
/* If platform's RF_KILL switch is NOT set to KILL */
|
||||||
|
@ -1076,7 +1080,7 @@ static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
|
||||||
iwl_write_prph(trans, SCD_TXFACT, mask);
|
iwl_write_prph(trans, SCD_TXFACT, mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iwl_tx_start(struct iwl_trans *trans)
|
static void iwl_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
|
||||||
{
|
{
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
u32 a;
|
u32 a;
|
||||||
|
@ -1089,6 +1093,10 @@ static void iwl_tx_start(struct iwl_trans *trans)
|
||||||
|
|
||||||
trans_pcie->scd_base_addr =
|
trans_pcie->scd_base_addr =
|
||||||
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
|
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
|
||||||
|
|
||||||
|
WARN_ON(scd_base_addr != 0 &&
|
||||||
|
scd_base_addr != trans_pcie->scd_base_addr);
|
||||||
|
|
||||||
a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
|
a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
|
||||||
/* reset conext data memory */
|
/* reset conext data memory */
|
||||||
for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
|
for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
|
||||||
|
@ -1134,10 +1142,10 @@ static void iwl_tx_start(struct iwl_trans *trans)
|
||||||
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
|
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
|
static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
|
||||||
{
|
{
|
||||||
iwl_reset_ict(trans);
|
iwl_reset_ict(trans);
|
||||||
iwl_tx_start(trans);
|
iwl_tx_start(trans, scd_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1243,6 +1251,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
|
||||||
clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
|
clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
|
||||||
clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
|
clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
|
||||||
clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
|
clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
|
||||||
|
clear_bit(STATUS_RFKILL, &trans_pcie->status);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
|
static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
|
||||||
|
@ -2166,12 +2175,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||||
goto out_pci_release_regions;
|
goto out_pci_release_regions;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_info(&pdev->dev, "pci_resource_len = 0x%08llx\n",
|
|
||||||
(unsigned long long) pci_resource_len(pdev, 0));
|
|
||||||
dev_info(&pdev->dev, "pci_resource_base = %p\n", trans_pcie->hw_base);
|
|
||||||
|
|
||||||
dev_info(&pdev->dev, "HW Revision ID = 0x%X\n", pdev->revision);
|
|
||||||
|
|
||||||
/* We disable the RETRY_TIMEOUT register (0x41) to keep
|
/* We disable the RETRY_TIMEOUT register (0x41) to keep
|
||||||
* PCI Tx retries from interfering with C3 CPU state */
|
* PCI Tx retries from interfering with C3 CPU state */
|
||||||
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
|
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
|
||||||
|
@ -2197,7 +2200,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize the wait queue for commands */
|
/* Initialize the wait queue for commands */
|
||||||
init_waitqueue_head(&trans->wait_command_queue);
|
init_waitqueue_head(&trans_pcie->wait_command_queue);
|
||||||
spin_lock_init(&trans->reg_lock);
|
spin_lock_init(&trans->reg_lock);
|
||||||
|
|
||||||
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
|
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
|
||||||
|
|
|
@ -494,6 +494,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
|
||||||
_iwl_write_targ_mem_dwords(trans, stts_addr,
|
_iwl_write_targ_mem_dwords(trans, stts_addr,
|
||||||
zero_val, ARRAY_SIZE(zero_val));
|
zero_val, ARRAY_SIZE(zero_val));
|
||||||
|
|
||||||
|
iwl_tx_queue_unmap(trans, txq_id);
|
||||||
|
|
||||||
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
|
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -515,8 +517,9 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
||||||
struct iwl_queue *q = &txq->q;
|
struct iwl_queue *q = &txq->q;
|
||||||
struct iwl_device_cmd *out_cmd;
|
struct iwl_device_cmd *out_cmd;
|
||||||
struct iwl_cmd_meta *out_meta;
|
struct iwl_cmd_meta *out_meta;
|
||||||
|
void *dup_buf = NULL;
|
||||||
dma_addr_t phys_addr;
|
dma_addr_t phys_addr;
|
||||||
u32 idx;
|
int idx;
|
||||||
u16 copy_size, cmd_size;
|
u16 copy_size, cmd_size;
|
||||||
bool had_nocopy = false;
|
bool had_nocopy = false;
|
||||||
int i;
|
int i;
|
||||||
|
@ -533,10 +536,33 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
||||||
continue;
|
continue;
|
||||||
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
|
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
|
||||||
had_nocopy = true;
|
had_nocopy = true;
|
||||||
|
if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
|
||||||
|
idx = -EINVAL;
|
||||||
|
goto free_dup_buf;
|
||||||
|
}
|
||||||
|
} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
|
||||||
|
/*
|
||||||
|
* This is also a chunk that isn't copied
|
||||||
|
* to the static buffer so set had_nocopy.
|
||||||
|
*/
|
||||||
|
had_nocopy = true;
|
||||||
|
|
||||||
|
/* only allowed once */
|
||||||
|
if (WARN_ON(dup_buf)) {
|
||||||
|
idx = -EINVAL;
|
||||||
|
goto free_dup_buf;
|
||||||
|
}
|
||||||
|
|
||||||
|
dup_buf = kmemdup(cmd->data[i], cmd->len[i],
|
||||||
|
GFP_ATOMIC);
|
||||||
|
if (!dup_buf)
|
||||||
|
return -ENOMEM;
|
||||||
} else {
|
} else {
|
||||||
/* NOCOPY must not be followed by normal! */
|
/* NOCOPY must not be followed by normal! */
|
||||||
if (WARN_ON(had_nocopy))
|
if (WARN_ON(had_nocopy)) {
|
||||||
return -EINVAL;
|
idx = -EINVAL;
|
||||||
|
goto free_dup_buf;
|
||||||
|
}
|
||||||
copy_size += cmd->len[i];
|
copy_size += cmd->len[i];
|
||||||
}
|
}
|
||||||
cmd_size += cmd->len[i];
|
cmd_size += cmd->len[i];
|
||||||
|
@ -551,8 +577,10 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
||||||
if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
|
if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
|
||||||
"Command %s (%#x) is too large (%d bytes)\n",
|
"Command %s (%#x) is too large (%d bytes)\n",
|
||||||
trans_pcie_get_cmd_string(trans_pcie, cmd->id),
|
trans_pcie_get_cmd_string(trans_pcie, cmd->id),
|
||||||
cmd->id, copy_size))
|
cmd->id, copy_size)) {
|
||||||
return -EINVAL;
|
idx = -EINVAL;
|
||||||
|
goto free_dup_buf;
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock_bh(&txq->lock);
|
spin_lock_bh(&txq->lock);
|
||||||
|
|
||||||
|
@ -561,7 +589,8 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
||||||
|
|
||||||
IWL_ERR(trans, "No space in command queue\n");
|
IWL_ERR(trans, "No space in command queue\n");
|
||||||
iwl_op_mode_cmd_queue_full(trans->op_mode);
|
iwl_op_mode_cmd_queue_full(trans->op_mode);
|
||||||
return -ENOSPC;
|
idx = -ENOSPC;
|
||||||
|
goto free_dup_buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
idx = get_cmd_index(q, q->write_ptr);
|
idx = get_cmd_index(q, q->write_ptr);
|
||||||
|
@ -585,7 +614,8 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
||||||
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
||||||
if (!cmd->len[i])
|
if (!cmd->len[i])
|
||||||
continue;
|
continue;
|
||||||
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
|
if (cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
|
||||||
|
IWL_HCMD_DFL_DUP))
|
||||||
break;
|
break;
|
||||||
memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
|
memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
|
||||||
cmd_pos += cmd->len[i];
|
cmd_pos += cmd->len[i];
|
||||||
|
@ -627,11 +657,16 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
||||||
iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
|
iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
|
||||||
|
|
||||||
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
||||||
|
const void *data = cmd->data[i];
|
||||||
|
|
||||||
if (!cmd->len[i])
|
if (!cmd->len[i])
|
||||||
continue;
|
continue;
|
||||||
if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
|
if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
|
||||||
|
IWL_HCMD_DFL_DUP)))
|
||||||
continue;
|
continue;
|
||||||
phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i],
|
if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
|
||||||
|
data = dup_buf;
|
||||||
|
phys_addr = dma_map_single(trans->dev, (void *)data,
|
||||||
cmd->len[i], DMA_BIDIRECTIONAL);
|
cmd->len[i], DMA_BIDIRECTIONAL);
|
||||||
if (dma_mapping_error(trans->dev, phys_addr)) {
|
if (dma_mapping_error(trans->dev, phys_addr)) {
|
||||||
iwl_unmap_tfd(trans, out_meta,
|
iwl_unmap_tfd(trans, out_meta,
|
||||||
|
@ -646,6 +681,9 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
out_meta->flags = cmd->flags;
|
out_meta->flags = cmd->flags;
|
||||||
|
if (WARN_ON_ONCE(txq->entries[idx].free_buf))
|
||||||
|
kfree(txq->entries[idx].free_buf);
|
||||||
|
txq->entries[idx].free_buf = dup_buf;
|
||||||
|
|
||||||
txq->need_update = 1;
|
txq->need_update = 1;
|
||||||
|
|
||||||
|
@ -662,6 +700,9 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock_bh(&txq->lock);
|
spin_unlock_bh(&txq->lock);
|
||||||
|
free_dup_buf:
|
||||||
|
if (idx < 0)
|
||||||
|
kfree(dup_buf);
|
||||||
return idx;
|
return idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -786,7 +827,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
|
||||||
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
|
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
|
||||||
trans_pcie_get_cmd_string(trans_pcie,
|
trans_pcie_get_cmd_string(trans_pcie,
|
||||||
cmd->hdr.cmd));
|
cmd->hdr.cmd));
|
||||||
wake_up(&trans->wait_command_queue);
|
wake_up(&trans_pcie->wait_command_queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
meta->flags = 0;
|
meta->flags = 0;
|
||||||
|
@ -845,7 +886,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = wait_event_timeout(trans->wait_command_queue,
|
ret = wait_event_timeout(trans_pcie->wait_command_queue,
|
||||||
!test_bit(STATUS_HCMD_ACTIVE,
|
!test_bit(STATUS_HCMD_ACTIVE,
|
||||||
&trans_pcie->status),
|
&trans_pcie->status),
|
||||||
HOST_COMPLETE_TIMEOUT);
|
HOST_COMPLETE_TIMEOUT);
|
||||||
|
@ -874,6 +915,19 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) {
|
||||||
|
IWL_ERR(trans, "FW error in SYNC CMD %s\n",
|
||||||
|
trans_pcie_get_cmd_string(trans_pcie, cmd->id));
|
||||||
|
ret = -EIO;
|
||||||
|
goto cancel;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (test_bit(STATUS_RFKILL, &trans_pcie->status)) {
|
||||||
|
IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
|
||||||
|
ret = -ERFKILL;
|
||||||
|
goto cancel;
|
||||||
|
}
|
||||||
|
|
||||||
if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
|
if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
|
||||||
IWL_ERR(trans, "Error: Response NULL in '%s'\n",
|
IWL_ERR(trans, "Error: Response NULL in '%s'\n",
|
||||||
trans_pcie_get_cmd_string(trans_pcie, cmd->id));
|
trans_pcie_get_cmd_string(trans_pcie, cmd->id));
|
||||||
|
@ -905,9 +959,18 @@ cancel:
|
||||||
|
|
||||||
int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
||||||
{
|
{
|
||||||
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
|
|
||||||
|
if (test_bit(STATUS_FW_ERROR, &trans_pcie->status))
|
||||||
|
return -EIO;
|
||||||
|
|
||||||
|
if (test_bit(STATUS_RFKILL, &trans_pcie->status))
|
||||||
|
return -ERFKILL;
|
||||||
|
|
||||||
if (cmd->flags & CMD_ASYNC)
|
if (cmd->flags & CMD_ASYNC)
|
||||||
return iwl_send_cmd_async(trans, cmd);
|
return iwl_send_cmd_async(trans, cmd);
|
||||||
|
|
||||||
|
/* We still can fail on RFKILL that can be asserted while we wait */
|
||||||
return iwl_send_cmd_sync(trans, cmd);
|
return iwl_send_cmd_sync(trans, cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue