ath10k: Handle mgmt tx completion event

WCN3900 uses mgmt frame tx by ref via WMI.
The txed mgmt frames should be freed when the
firmware sends the over-the-air tx status of
the corresponding mgmt frames.

Handle the wmi mgmt tx completion event and free
the corresponding mgmt frame.

CRs-Fixed: 2181843
Change-Id: I07135230e39aecff3f646d3eab2b6ab5272cb21b
Signed-off-by: Rakesh Pillai <pillair@codeaurora.org>
This commit is contained in:
Rakesh Pillai 2018-01-29 11:19:32 +05:30
parent 89c988e117
commit 80b468d70d
8 changed files with 184 additions and 2 deletions

View file

@ -1713,6 +1713,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
WMI_STAT_PEER;
ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
ar->wmi.mgmt_max_num_pending_tx = TARGET_TLV_MGMT_NUM_MSDU_DESC;
break;
case ATH10K_FW_WMI_OP_VERSION_10_4:
ar->max_num_peers = TARGET_10_4_NUM_PEERS;

View file

@ -174,6 +174,10 @@ struct ath10k_wmi {
const struct wmi_ops *ops;
const struct wmi_peer_flags_map *peer_flags;
u32 mgmt_max_num_pending_tx;
struct idr mgmt_pending_tx;
/* Protects access to mgmt_pending_tx, mgmt_max_num_pending_tx */
spinlock_t mgmt_tx_lock;
u32 num_mem_chunks;
u32 rx_decap_mode;
struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];

View file

@ -625,6 +625,8 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
#define TARGET_TLV_NUM_WOW_PATTERNS 22
/* FW supports max 50 outstanding mgmt cmds */
#define TARGET_TLV_MGMT_NUM_MSDU_DESC (50)
/* Target specific defines for WMI-HL-1.0 firmware */
#define TARGET_HL_10_TLV_NUM_PEERS 14

View file

@ -29,6 +29,8 @@ struct wmi_ops {
struct wmi_scan_ev_arg *arg);
int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_mgmt_rx_ev_arg *arg);
int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_tlv_mgmt_tx_compl_ev_arg *arg);
int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_ch_info_ev_arg *arg);
int (*pull_peer_delete_resp)(struct ath10k *ar, struct sk_buff *skb,
@ -244,6 +246,16 @@ ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
return ar->wmi.ops->pull_scan(ar, skb, arg);
}
static inline int
ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
{
if (!ar->wmi.ops->pull_mgmt_tx_compl)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg);
}
static inline int
ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
struct wmi_mgmt_rx_ev_arg *arg)

View file

@ -558,6 +558,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_TLV_PEER_DELETE_RESP_EVENTID:
ath10k_wmi_tlv_event_peer_delete_resp(ar, skb);
break;
case WMI_TLV_MGMT_TX_COMPLETION_EVENTID:
ath10k_wmi_tlv_event_mgmt_tx_compl(ar, skb);
break;
default:
ath10k_dbg(ar, ATH10K_DBG_WMI, "Unknown eventid: %d\n", id);
break;
@ -599,6 +602,31 @@ static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
return 0;
}
static int ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(
struct ath10k *ar, struct sk_buff *skb,
struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
{
const void **tb;
const struct wmi_tlv_mgmt_tx_compl_ev *ev;
int ret;
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL];
arg->desc_id = ev->desc_id;
arg->status = ev->status;
arg->pdev_id = ev->pdev_id;
kfree(tb);
return 0;
}
static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
struct sk_buff *skb,
struct wmi_mgmt_rx_ev_arg *arg)
@ -2488,6 +2516,30 @@ ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
return skb;
}
static int
ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
dma_addr_t paddr)
{
struct ath10k_wmi *wmi = &ar->wmi;
struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
int ret;
pkt_addr = kmalloc(sizeof(*pkt_addr), GFP_ATOMIC);
if (!pkt_addr)
return -ENOMEM;
pkt_addr->vaddr = skb;
pkt_addr->paddr = paddr;
spin_lock_bh(&wmi->mgmt_tx_lock);
ret = idr_alloc(&wmi->mgmt_pending_tx, pkt_addr, 0,
wmi->mgmt_max_num_pending_tx, GFP_ATOMIC);
spin_unlock_bh(&wmi->mgmt_tx_lock);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx alloc msdu_id %d\n", ret);
return ret;
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
{
@ -2520,9 +2572,9 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
u32 buf_len = msdu->len;
struct wmi_tlv *tlv;
struct sk_buff *skb;
int desc_id, len;
u32 vdev_id;
void *ptr;
int len;
u16 fc;
hdr = (struct ieee80211_hdr *)msdu->data;
@ -2554,13 +2606,17 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
if (!skb)
return ERR_PTR(-ENOMEM);
desc_id = ath10k_wmi_mgmt_tx_alloc_msdu_id(ar, msdu, paddr);
if (desc_id < 0)
goto msdu_id_alloc_fail;
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
tlv->len = __cpu_to_le16(sizeof(cmd->hdr));
cmd = (void *)tlv->value;
cmd->hdr.vdev_id = vdev_id;
cmd->hdr.desc_id = 0;
cmd->hdr.desc_id = desc_id;
cmd->hdr.chanfreq = 0;
cmd->hdr.buf_len = __cpu_to_le32(buf_len);
cmd->hdr.frame_len = __cpu_to_le32(msdu->len);
@ -2577,6 +2633,10 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
memcpy(cmd->buf, msdu->data, buf_len);
return skb;
msdu_id_alloc_fail:
dev_kfree_skb(skb);
return ERR_PTR(desc_id);
}
static struct sk_buff *
@ -3750,6 +3810,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
.pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
.pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev,
.pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
.pull_peer_delete_resp = ath10k_wmi_tlv_op_pull_peer_delete_ev,
.pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,

View file

@ -322,6 +322,7 @@ enum wmi_tlv_event_id {
WMI_TLV_TBTTOFFSET_UPDATE_EVENTID,
WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID,
WMI_TLV_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
WMI_TLV_MGMT_TX_COMPLETION_EVENTID,
WMI_TLV_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_BA_NEG),
WMI_TLV_TX_ADDBA_COMPLETE_EVENTID,
WMI_TLV_BA_RSP_SSN_EVENTID,
@ -898,6 +899,7 @@ enum wmi_tlv_tag {
WMI_TLV_TAG_STRUCT_HL_1_0_SVC_OFFSET = 176,
WMI_TLV_TAG_STRUCT_MGMT_TX_CMD = 0x1A6,
WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL,
WMI_TLV_TAG_STRUCT_PEER_DELETE_RESP_EVENT = 0x1C3,
WMI_TLV_TAG_MAX
@ -1186,6 +1188,17 @@ struct wmi_tlv {
u8 value[0];
} __packed;
struct ath10k_mgmt_tx_pkt_addr {
void *vaddr;
dma_addr_t paddr;
};
struct wmi_tlv_mgmt_tx_compl_ev {
__le32 desc_id;
__le32 status;
__le32 pdev_id;
};
#define WMI_TLV_MGMT_RX_NUM_RSSI 4
struct wmi_tlv_mgmt_rx_ev {

View file

@ -2287,6 +2287,59 @@ int ath10k_wmi_tlv_event_peer_delete_resp(struct ath10k *ar,
return 0;
}
static int wmi_tlv_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id,
u32 status)
{
struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
struct ath10k_wmi *wmi = &ar->wmi;
struct ieee80211_tx_info *info;
struct sk_buff *msdu;
int ret = 0;
spin_lock_bh(&wmi->mgmt_tx_lock);
pkt_addr = idr_find(&wmi->mgmt_pending_tx, desc_id);
if (!pkt_addr) {
ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n",
desc_id);
ret = -ENOENT;
goto tx_comp_process_done;
}
msdu = pkt_addr->vaddr;
dma_unmap_single(ar->dev, pkt_addr->paddr,
msdu->len, DMA_FROM_DEVICE);
info = IEEE80211_SKB_CB(msdu);
if (!status)
info->flags |= IEEE80211_TX_STAT_ACK;
else
info->flags |= status;
ieee80211_tx_status_irqsafe(ar->hw, msdu);
ret = 0;
tx_comp_process_done:
idr_remove(&wmi->mgmt_pending_tx, desc_id);
spin_unlock_bh(&wmi->mgmt_tx_lock);
return ret;
}
int ath10k_wmi_tlv_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb)
{
int ret;
struct wmi_tlv_mgmt_tx_compl_ev_arg arg;
ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg);
if (ret) {
ath10k_warn(ar, "failed to parse mgmt comp event: %d\n", ret);
return ret;
}
wmi_tlv_process_mgmt_tx_comp(ar, arg.desc_id, arg.status);
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TLV_MGMT_TX_COMPLETION_EVENTID\n");
return 0;
}
int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_mgmt_rx_ev_arg arg = {};
@ -8307,6 +8360,11 @@ int ath10k_wmi_attach(struct ath10k *ar)
INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
if (QCA_REV_WCN3990(ar)) {
spin_lock_init(&ar->wmi.mgmt_tx_lock);
idr_init(&ar->wmi.mgmt_pending_tx);
}
return 0;
}
@ -8326,8 +8384,32 @@ void ath10k_wmi_free_host_mem(struct ath10k *ar)
ar->wmi.num_mem_chunks = 0;
}
static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
void *ctx)
{
struct ath10k_mgmt_tx_pkt_addr *pkt_addr = ptr;
struct ath10k *ar = ctx;
struct sk_buff *msdu;
ath10k_dbg(ar, ATH10K_DBG_WMI,
"force cleanup mgmt msdu_id %hu\n", msdu_id);
msdu = pkt_addr->vaddr;
dma_unmap_single(ar->dev, pkt_addr->paddr,
msdu->len, DMA_FROM_DEVICE);
ieee80211_free_txskb(ar->hw, msdu);
return 0;
}
void ath10k_wmi_detach(struct ath10k *ar)
{
if (QCA_REV_WCN3990(ar)) {
idr_for_each(&ar->wmi.mgmt_pending_tx,
ath10k_wmi_mgmt_tx_clean_up_pending, ar);
idr_destroy(&ar->wmi.mgmt_pending_tx);
}
cancel_work_sync(&ar->svc_rdy_work);
if (ar->svc_rdy_skb)

View file

@ -6308,6 +6308,12 @@ struct wmi_peer_delete_resp_ev_arg {
struct wmi_mac_addr peer_addr;
};
struct wmi_tlv_mgmt_tx_compl_ev_arg {
__le32 desc_id;
__le32 status;
__le32 pdev_id;
};
struct wmi_mgmt_rx_ev_arg {
__le32 channel;
__le32 snr;
@ -6682,6 +6688,7 @@ int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb);
int ath10k_wmi_tlv_event_peer_delete_resp(struct ath10k *ar,
struct sk_buff *skb);
int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb);
int ath10k_wmi_tlv_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb);
int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb);