mmc: queue: initialization of command queue

Command Queueing (CQ) feature is introduced to eMMC
standard in revision 5.1. CQ includes new commands
for issuing tasks to the device, for ordering the
execution of previously issued tasks and for
additional task management functions.

The idea is to keep the legacy and CQ code as discrete
as possible. Hence, a separate queue is created for CQ.
The issuing path is non-blocking since several requests
(max. 32) can be queued at a time.

Change-Id: I5b48d1b3ed17585b907ec70ff7c8d583003ec9e1
Signed-off-by: Asutosh Das <asutoshd@codeaurora.org>
Signed-off-by: Venkat Gopalakrishnan <venkatg@codeaurora.org>
[subhashj@codeaurora.org: fixed trivial merge conflicts & compilation
error]
Signed-off-by: Subhash Jadavani <subhashj@codeaurora.org>
This commit is contained in:
Asutosh Das 2015-05-21 13:29:51 +05:30 committed by Subhash Jadavani
parent 534f1ea89d
commit 6d1be7e42b
4 changed files with 219 additions and 4 deletions

View file

@ -2800,7 +2800,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
INIT_LIST_HEAD(&md->part);
md->usage = 1;
ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
ret = mmc_init_queue(&md->queue, card, &md->lock, subname, area_type);
if (ret)
goto err_putdisk;

View file

@ -55,6 +55,72 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
return BLKPREP_OK;
}
static inline bool mmc_cmdq_should_pull_reqs(struct mmc_host *host,
struct mmc_cmdq_context_info *ctx)
{
if (test_bit(CMDQ_STATE_ERR, &ctx->curr_state)) {
pr_debug("%s: %s: skip pulling reqs: state: %lu\n",
mmc_hostname(host), __func__, ctx->curr_state);
return false;
} else {
return true;
}
}
static int mmc_cmdq_thread(void *d)
{
struct mmc_queue *mq = d;
struct request_queue *q = mq->queue;
struct mmc_card *card = mq->card;
struct request *req;
struct mmc_host *host = card->host;
struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
unsigned long flags;
current->flags |= PF_MEMALLOC;
if (card->host->wakeup_on_idle)
set_wake_up_idle(true);
while (1) {
int ret = 0;
if (!mmc_cmdq_should_pull_reqs(host, ctx)) {
test_and_set_bit(0, &ctx->req_starved);
schedule();
}
spin_lock_irqsave(q->queue_lock, flags);
req = blk_peek_request(q);
if (req) {
ret = blk_queue_start_tag(q, req);
spin_unlock_irqrestore(q->queue_lock, flags);
if (ret) {
test_and_set_bit(0, &ctx->req_starved);
schedule();
} else {
ret = mq->cmdq_issue_fn(mq, req);
if (ret) {
pr_err("%s: failed (%d) to issue req, requeue\n",
mmc_hostname(host), ret);
spin_lock_irqsave(q->queue_lock, flags);
blk_requeue_request(q, req);
spin_unlock_irqrestore(q->queue_lock,
flags);
}
}
} else {
spin_unlock_irqrestore(q->queue_lock, flags);
if (kthread_should_stop()) {
set_current_state(TASK_RUNNING);
break;
}
schedule();
}
} /* loop */
return 0;
}
static int mmc_queue_thread(void *d)
{
struct mmc_queue *mq = d;
@ -119,6 +185,13 @@ static int mmc_queue_thread(void *d)
return 0;
}
static void mmc_cmdq_dispatch_req(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
wake_up_process(mq->thread);
}
/*
* Generic MMC request handler. This is called for any queue on a
* particular host. When the host is not busy, we look for a request
@ -193,6 +266,29 @@ static void mmc_queue_setup_discard(struct request_queue *q,
queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
}
/**
* mmc_blk_cmdq_setup_queue
* @mq: mmc queue
* @card: card to attach to this queue
*
* Setup queue for CMDQ supporting MMC card
*/
void mmc_cmdq_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{
u64 limit = BLK_BOUNCE_HIGH;
struct mmc_host *host = card->host;
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card);
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count,
host->max_req_size / 512));
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
blk_queue_max_segments(mq->queue, host->max_segs);
}
/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
@ -203,7 +299,7 @@ static void mmc_queue_setup_discard(struct request_queue *q,
* Initialise a MMC card request queue.
*/
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
spinlock_t *lock, const char *subname)
spinlock_t *lock, const char *subname, int area_type)
{
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
@ -215,6 +311,28 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
mq->card = card;
if (card->ext_csd.cmdq_support &&
(area_type == MMC_BLK_DATA_AREA_MAIN)) {
mq->queue = blk_init_queue(mmc_cmdq_dispatch_req, lock);
if (!mq->queue)
return -ENOMEM;
mmc_cmdq_setup_queue(mq, card);
ret = mmc_cmdq_init(mq, card);
if (ret) {
pr_err("%s: %d: cmdq: unable to set-up\n",
mmc_hostname(card->host), ret);
blk_cleanup_queue(mq->queue);
} else {
mq->queue->queuedata = mq;
mq->thread = kthread_run(mmc_cmdq_thread, mq,
"mmc-cmdqd/%d%s",
host->index,
subname ? subname : "");
return ret;
}
}
mq->queue = blk_init_queue(mmc_request_fn, lock);
if (!mq->queue)
return -ENOMEM;
@ -443,6 +561,77 @@ void mmc_packed_clean(struct mmc_queue *mq)
mqrq_prev->packed = NULL;
}
static void mmc_cmdq_softirq_done(struct request *rq)
{
struct mmc_queue *mq = rq->q->queuedata;
mq->cmdq_complete_fn(rq);
}
int mmc_cmdq_init(struct mmc_queue *mq, struct mmc_card *card)
{
int i, ret = 0;
/* one slot is reserved for dcmd requests */
int q_depth = card->ext_csd.cmdq_depth - 1;
if (!(card->host->caps2 & MMC_CAP2_CMD_QUEUE)) {
ret = -ENOTSUPP;
goto out;
}
mq->mqrq_cmdq = kzalloc(
sizeof(struct mmc_queue_req) * q_depth, GFP_KERNEL);
if (!mq->mqrq_cmdq) {
pr_warn("%s: unable to allocate mqrq's for q_depth %d\n",
mmc_card_name(card), q_depth);
ret = -ENOMEM;
goto out;
}
/* sg is allocated for data request slots only */
for (i = 0; i < q_depth; i++) {
mq->mqrq_cmdq[i].sg = mmc_alloc_sg(card->host->max_segs, &ret);
if (ret) {
pr_warn("%s: unable to allocate cmdq sg of size %d\n",
mmc_card_name(card),
card->host->max_segs);
goto free_mqrq_sg;
}
}
ret = blk_queue_init_tags(mq->queue, q_depth, NULL, BLK_TAG_ALLOC_FIFO);
if (ret) {
pr_warn("%s: unable to allocate cmdq tags %d\n",
mmc_card_name(card), q_depth);
goto free_mqrq_sg;
}
blk_queue_softirq_done(mq->queue, mmc_cmdq_softirq_done);
goto out;
free_mqrq_sg:
for (i = 0; i < q_depth; i++)
kfree(mq->mqrq_cmdq[i].sg);
kfree(mq->mqrq_cmdq);
mq->mqrq_cmdq = NULL;
out:
return ret;
}
void mmc_cmdq_clean(struct mmc_queue *mq, struct mmc_card *card)
{
int i;
int q_depth = card->ext_csd.cmdq_depth - 1;
blk_free_tags(mq->queue->queue_tags);
mq->queue->queue_tags = NULL;
blk_queue_free_tags(mq->queue);
for (i = 0; i < q_depth; i++)
kfree(mq->mqrq_cmdq[i].sg);
kfree(mq->mqrq_cmdq);
mq->mqrq_cmdq = NULL;
}
/**
* mmc_queue_suspend - suspend a MMC request queue
* @mq: MMC queue to suspend

View file

@ -52,12 +52,16 @@ struct mmc_queue {
#define MMC_QUEUE_SUSPENDED 0
#define MMC_QUEUE_NEW_REQUEST 1
int (*issue_fn)(struct mmc_queue *, struct request *);
int (*issue_fn)(struct mmc_queue *, struct request *);
int (*cmdq_issue_fn)(struct mmc_queue *,
struct request *);
void (*cmdq_complete_fn)(struct request *);
void *data;
struct request_queue *queue;
struct mmc_queue_req mqrq[2];
struct mmc_queue_req *mqrq_cur;
struct mmc_queue_req *mqrq_prev;
struct mmc_queue_req *mqrq_cmdq;
bool wr_packing_enabled;
int num_of_potential_packed_wr_reqs;
int num_wr_reqs_to_start_packing;
@ -67,7 +71,7 @@ struct mmc_queue {
};
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
const char *);
const char *, int);
extern void mmc_cleanup_queue(struct mmc_queue *);
extern int mmc_queue_suspend(struct mmc_queue *, int);
extern void mmc_queue_resume(struct mmc_queue *);
@ -84,4 +88,7 @@ extern int mmc_access_rpmb(struct mmc_queue *);
extern void print_mmc_packing_stats(struct mmc_card *card);
extern int mmc_cmdq_init(struct mmc_queue *mq, struct mmc_card *card);
extern void mmc_cmdq_clean(struct mmc_queue *mq, struct mmc_card *card);
#endif

View file

@ -195,6 +195,23 @@ struct mmc_slot {
void *handler_priv;
};
/**
* mmc_cmdq_context_info - describes the contexts of cmdq
* @active_reqs requests being processed
* @curr_state state of cmdq engine
* @req_starved completion should invoke the request_fn since
* no tags were available
* @cmdq_ctx_lock acquire this before accessing this structure
*/
struct mmc_cmdq_context_info {
unsigned long active_reqs; /* in-flight requests */
unsigned long curr_state;
#define CMDQ_STATE_ERR 0
/* no free tag available */
unsigned long req_starved;
};
/**
* mmc_context_info - synchronization details for mmc context
* @is_done_rcv wake up reason was done request
@ -370,6 +387,7 @@ struct mmc_host {
/* Some hosts need additional tuning */
#define MMC_CAP2_HS400_POST_TUNING (1 << 22)
#define MMC_CAP2_NONHOTPLUG (1 << 25) /*Don't support hotplug*/
#define MMC_CAP2_CMD_QUEUE (1 << 26) /* support eMMC command queue */
mmc_pm_flag_t pm_caps; /* supported pm features */
@ -492,6 +510,7 @@ struct mmc_host {
#endif
enum dev_state dev_status;
bool wakeup_on_idle;
struct mmc_cmdq_context_info cmdq_ctx;
unsigned long private[0] ____cacheline_aligned;
};