iwlcore: Allow skb allocation from tasklet.
If RX queue becomes empty then we need to restock the queue from tasklet to prevent ucode from starving. A caller to iwl_rx_allocate will decide if allocated buffer should come from GFP_ATOMIC or GFP_KERNEL. Signed-off-by: Mohamed Abbas <mohamed.abbas@intel.com> Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
This commit is contained in:
parent
ef850d7cb3
commit
4752c93c30
4 changed files with 34 additions and 10 deletions
drivers/net/wireless/iwlwifi
|
@ -831,6 +831,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u8 fill_rx = 0;
|
u8 fill_rx = 0;
|
||||||
u32 count = 8;
|
u32 count = 8;
|
||||||
|
int total_empty;
|
||||||
|
|
||||||
/* uCode's read index (stored in shared DRAM) indicates the last Rx
|
/* uCode's read index (stored in shared DRAM) indicates the last Rx
|
||||||
* buffer that the driver may process (last buffer filled by ucode). */
|
* buffer that the driver may process (last buffer filled by ucode). */
|
||||||
|
@ -841,7 +842,12 @@ void iwl_rx_handle(struct iwl_priv *priv)
|
||||||
if (i == r)
|
if (i == r)
|
||||||
IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
|
IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
|
||||||
|
|
||||||
if (iwl_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2))
|
/* calculate total frames need to be restock after handling RX */
|
||||||
|
total_empty = r - priv->rxq.write_actual;
|
||||||
|
if (total_empty < 0)
|
||||||
|
total_empty += RX_QUEUE_SIZE;
|
||||||
|
|
||||||
|
if (total_empty > (RX_QUEUE_SIZE / 2))
|
||||||
fill_rx = 1;
|
fill_rx = 1;
|
||||||
|
|
||||||
while (i != r) {
|
while (i != r) {
|
||||||
|
@ -918,7 +924,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
|
||||||
count++;
|
count++;
|
||||||
if (count >= 8) {
|
if (count >= 8) {
|
||||||
priv->rxq.read = i;
|
priv->rxq.read = i;
|
||||||
iwl_rx_queue_restock(priv);
|
iwl_rx_replenish_now(priv);
|
||||||
count = 0;
|
count = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -926,7 +932,10 @@ void iwl_rx_handle(struct iwl_priv *priv)
|
||||||
|
|
||||||
/* Backtrack one entry */
|
/* Backtrack one entry */
|
||||||
priv->rxq.read = i;
|
priv->rxq.read = i;
|
||||||
iwl_rx_queue_restock(priv);
|
if (fill_rx)
|
||||||
|
iwl_rx_replenish_now(priv);
|
||||||
|
else
|
||||||
|
iwl_rx_queue_restock(priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* call this function to flush any scheduled tasklet */
|
/* call this function to flush any scheduled tasklet */
|
||||||
|
|
|
@ -318,10 +318,11 @@ int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
|
||||||
struct iwl_rx_queue *q);
|
struct iwl_rx_queue *q);
|
||||||
void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
|
void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
|
||||||
void iwl_rx_replenish(struct iwl_priv *priv);
|
void iwl_rx_replenish(struct iwl_priv *priv);
|
||||||
|
void iwl_rx_replenish_now(struct iwl_priv *priv);
|
||||||
int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
|
int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
|
||||||
int iwl_rx_queue_restock(struct iwl_priv *priv);
|
int iwl_rx_queue_restock(struct iwl_priv *priv);
|
||||||
int iwl_rx_queue_space(const struct iwl_rx_queue *q);
|
int iwl_rx_queue_space(const struct iwl_rx_queue *q);
|
||||||
void iwl_rx_allocate(struct iwl_priv *priv);
|
void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority);
|
||||||
void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
|
void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
|
||||||
int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
|
int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
|
||||||
/* Handlers */
|
/* Handlers */
|
||||||
|
|
|
@ -382,6 +382,7 @@ struct iwl_rx_queue {
|
||||||
u32 read;
|
u32 read;
|
||||||
u32 write;
|
u32 write;
|
||||||
u32 free_count;
|
u32 free_count;
|
||||||
|
u32 write_actual;
|
||||||
struct list_head rx_free;
|
struct list_head rx_free;
|
||||||
struct list_head rx_used;
|
struct list_head rx_used;
|
||||||
int need_update;
|
int need_update;
|
||||||
|
|
|
@ -145,12 +145,14 @@ int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
|
||||||
goto exit_unlock;
|
goto exit_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write & ~0x7);
|
q->write_actual = (q->write & ~0x7);
|
||||||
|
iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual);
|
||||||
|
|
||||||
/* Else device is assumed to be awake */
|
/* Else device is assumed to be awake */
|
||||||
} else {
|
} else {
|
||||||
/* Device expects a multiple of 8 */
|
/* Device expects a multiple of 8 */
|
||||||
iwl_write32(priv, rx_wrt_ptr_reg, q->write & ~0x7);
|
q->write_actual = (q->write & ~0x7);
|
||||||
|
iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual);
|
||||||
}
|
}
|
||||||
|
|
||||||
q->need_update = 0;
|
q->need_update = 0;
|
||||||
|
@ -212,7 +214,7 @@ int iwl_rx_queue_restock(struct iwl_priv *priv)
|
||||||
|
|
||||||
/* If we've added more space for the firmware to place data, tell it.
|
/* If we've added more space for the firmware to place data, tell it.
|
||||||
* Increment device's write pointer in multiples of 8. */
|
* Increment device's write pointer in multiples of 8. */
|
||||||
if (write != (rxq->write & ~0x7)) {
|
if (rxq->write_actual != (rxq->write & ~0x7)) {
|
||||||
spin_lock_irqsave(&rxq->lock, flags);
|
spin_lock_irqsave(&rxq->lock, flags);
|
||||||
rxq->need_update = 1;
|
rxq->need_update = 1;
|
||||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||||
|
@ -232,7 +234,7 @@ EXPORT_SYMBOL(iwl_rx_queue_restock);
|
||||||
* Also restock the Rx queue via iwl_rx_queue_restock.
|
* Also restock the Rx queue via iwl_rx_queue_restock.
|
||||||
* This is called as a scheduled work item (except for during initialization)
|
* This is called as a scheduled work item (except for during initialization)
|
||||||
*/
|
*/
|
||||||
void iwl_rx_allocate(struct iwl_priv *priv)
|
void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
|
||||||
{
|
{
|
||||||
struct iwl_rx_queue *rxq = &priv->rxq;
|
struct iwl_rx_queue *rxq = &priv->rxq;
|
||||||
struct list_head *element;
|
struct list_head *element;
|
||||||
|
@ -254,7 +256,8 @@ void iwl_rx_allocate(struct iwl_priv *priv)
|
||||||
|
|
||||||
/* Alloc a new receive buffer */
|
/* Alloc a new receive buffer */
|
||||||
rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
|
rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
|
||||||
GFP_KERNEL);
|
priority);
|
||||||
|
|
||||||
if (!rxb->skb) {
|
if (!rxb->skb) {
|
||||||
IWL_CRIT(priv, "Can not allocate SKB buffers\n");
|
IWL_CRIT(priv, "Can not allocate SKB buffers\n");
|
||||||
/* We don't reschedule replenish work here -- we will
|
/* We don't reschedule replenish work here -- we will
|
||||||
|
@ -289,7 +292,7 @@ void iwl_rx_replenish(struct iwl_priv *priv)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
iwl_rx_allocate(priv);
|
iwl_rx_allocate(priv, GFP_KERNEL);
|
||||||
|
|
||||||
spin_lock_irqsave(&priv->lock, flags);
|
spin_lock_irqsave(&priv->lock, flags);
|
||||||
iwl_rx_queue_restock(priv);
|
iwl_rx_queue_restock(priv);
|
||||||
|
@ -297,6 +300,14 @@ void iwl_rx_replenish(struct iwl_priv *priv)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(iwl_rx_replenish);
|
EXPORT_SYMBOL(iwl_rx_replenish);
|
||||||
|
|
||||||
|
void iwl_rx_replenish_now(struct iwl_priv *priv)
|
||||||
|
{
|
||||||
|
iwl_rx_allocate(priv, GFP_ATOMIC);
|
||||||
|
|
||||||
|
iwl_rx_queue_restock(priv);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(iwl_rx_replenish_now);
|
||||||
|
|
||||||
|
|
||||||
/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
|
/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
|
||||||
* If an SKB has been detached, the POOL needs to have its SKB set to NULL
|
* If an SKB has been detached, the POOL needs to have its SKB set to NULL
|
||||||
|
@ -352,6 +363,7 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
|
||||||
/* Set us so that we have processed and used all buffers, but have
|
/* Set us so that we have processed and used all buffers, but have
|
||||||
* not restocked the Rx queue with fresh buffers */
|
* not restocked the Rx queue with fresh buffers */
|
||||||
rxq->read = rxq->write = 0;
|
rxq->read = rxq->write = 0;
|
||||||
|
rxq->write_actual = 0;
|
||||||
rxq->free_count = 0;
|
rxq->free_count = 0;
|
||||||
rxq->need_update = 0;
|
rxq->need_update = 0;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -390,6 +402,7 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
|
||||||
/* Set us so that we have processed and used all buffers, but have
|
/* Set us so that we have processed and used all buffers, but have
|
||||||
* not restocked the Rx queue with fresh buffers */
|
* not restocked the Rx queue with fresh buffers */
|
||||||
rxq->read = rxq->write = 0;
|
rxq->read = rxq->write = 0;
|
||||||
|
rxq->write_actual = 0;
|
||||||
rxq->free_count = 0;
|
rxq->free_count = 0;
|
||||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue