scsi: ufs: use "sector_range" in "ufs_long" unit-tests

Use the user-defined sector_range instead of the previously hard-coded
TEST_MAX_SECTOR_RANGE. If the user does not supply a sector_range, this
variable will default to 512MiB.
For the long sequential tests, sector_range will define the size of the
sequential I/O to submit. For the long random tests, sector_range will
define the range in which to submit random I/O as well as the total size
of the random I/O which is defined by the combination of sector_range and
LONG_RAND_TEST_REQ_RATIO.

Change-Id: Ifc7332e6def75c49448aadbebd35b7b9b3903447
Signed-off-by: Lee Susman <lsusman@codeaurora.org>
Signed-off-by: Gilad Broner <gbroner@codeaurora.org>
This commit is contained in:
Gilad Broner 2015-01-13 16:45:05 +02:00 committed by David Keitel
parent ee581331ca
commit 7f59f4e70a

View file

@ -31,7 +31,7 @@
#define UFS_TEST_BLK_DEV_TYPE_PREFIX "sd" #define UFS_TEST_BLK_DEV_TYPE_PREFIX "sd"
#define TEST_MAX_BIOS_PER_REQ 128 #define TEST_MAX_BIOS_PER_REQ 128
#define TEST_MAX_SECTOR_RANGE (10*1024*1024) /* 5GB */ #define TEST_DEFAULT_SECTOR_RANGE (1024*1024) /* 512MB */
#define LARGE_PRIME_1 1103515367 #define LARGE_PRIME_1 1103515367
#define LARGE_PRIME_2 35757 #define LARGE_PRIME_2 35757
#define MAGIC_SEED 7 #define MAGIC_SEED 7
@ -44,10 +44,11 @@
#define SECTOR_SIZE 512 #define SECTOR_SIZE 512
#define NUM_UNLUCKY_RETRIES 10 #define NUM_UNLUCKY_RETRIES 10
/* the amount of requests that will be inserted */ /*
#define LONG_SEQ_TEST_NUM_REQS 256 * this defines the density of random requests in the address space, and
/* we issue 4KB requests, so 256 reqs = 1MB */ * it represents the ratio between accessed sectors and non-accessed sectors
#define LONG_RAND_TEST_NUM_REQS (256 * 64) */
#define LONG_RAND_TEST_REQ_RATIO 64
/* request queue limitation is 128 requests, and we leave 10 spare requests */ /* request queue limitation is 128 requests, and we leave 10 spare requests */
#define QUEUE_MAX_REQUESTS 118 #define QUEUE_MAX_REQUESTS 118
#define MB_MSEC_RATIO_APPROXIMATION ((1024 * 1024) / 1000) #define MB_MSEC_RATIO_APPROXIMATION ((1024 * 1024) / 1000)
@ -145,6 +146,11 @@ struct ufs_test_data {
atomic_t outstanding_threads; atomic_t outstanding_threads;
struct completion outstanding_complete; struct completion outstanding_complete;
/* user-defined size of address space in which to perform I/O */
u32 sector_range;
/* total number of requests to be submitted in long test */
u32 long_test_num_reqs;
struct test_iosched *test_iosched; struct test_iosched *test_iosched;
}; };
@ -277,20 +283,23 @@ static unsigned int ufs_test_pseudo_random_seed(unsigned int *seed_number,
* Note that for UFS sector number has to be aligned with block size. Since * Note that for UFS sector number has to be aligned with block size. Since
* scsi will send the block number as the LBA. * scsi will send the block number as the LBA.
*/ */
static void pseudo_rnd_sector_and_size(unsigned int *seed, static void pseudo_rnd_sector_and_size(struct ufs_test_data *utd,
unsigned int min_start_sector,
unsigned int *start_sector, unsigned int *start_sector,
unsigned int *num_of_bios) unsigned int *num_of_bios)
{ {
unsigned int max_sec = min_start_sector + TEST_MAX_SECTOR_RANGE; struct test_iosched *tios = utd->test_iosched;
u32 min_start_sector = tios->start_sector;
unsigned int max_sec = min_start_sector + utd->sector_range;
do { do {
*start_sector = ufs_test_pseudo_random_seed(seed, 1, max_sec); *start_sector = ufs_test_pseudo_random_seed(
*num_of_bios = ufs_test_pseudo_random_seed(seed, &utd->random_test_seed, 1, max_sec);
1, TEST_MAX_BIOS_PER_REQ); *num_of_bios = ufs_test_pseudo_random_seed(
&utd->random_test_seed, 1, TEST_MAX_BIOS_PER_REQ);
if (!(*num_of_bios)) if (!(*num_of_bios))
*num_of_bios = 1; *num_of_bios = 1;
} while ((*start_sector < min_start_sector) || } while ((*start_sector < min_start_sector) ||
(*start_sector + (*num_of_bios * BIO_U32_SIZE * 4)) > max_sec); (*start_sector + (*num_of_bios * TEST_BIO_SIZE)) > max_sec);
/* /*
* The test-iosched API is working with sectors 512b, while UFS LBA * The test-iosched API is working with sectors 512b, while UFS LBA
* is in blocks (4096). Thus the last 3 bits has to be cleared. * is in blocks (4096). Thus the last 3 bits has to be cleared.
@ -699,24 +708,26 @@ static bool ufs_test_multi_thread_completion(struct test_iosched *test_iosched)
static bool long_rand_test_check_completion(struct test_iosched *test_iosched) static bool long_rand_test_check_completion(struct test_iosched *test_iosched)
{ {
struct ufs_test_data *utd = test_iosched->blk_dev_test_data; struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
if (utd->completed_req_count > LONG_RAND_TEST_NUM_REQS) {
if (utd->completed_req_count > utd->long_test_num_reqs) {
pr_err("%s: Error: Completed more requests than total test requests.\nTerminating test." pr_err("%s: Error: Completed more requests than total test requests.\nTerminating test."
, __func__); , __func__);
return true; return true;
} }
return (utd->completed_req_count == LONG_RAND_TEST_NUM_REQS); return utd->completed_req_count == utd->long_test_num_reqs;
} }
static bool long_seq_test_check_completion(struct test_iosched *test_iosched) static bool long_seq_test_check_completion(struct test_iosched *test_iosched)
{ {
struct ufs_test_data *utd = test_iosched->blk_dev_test_data; struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
if (utd->completed_req_count > LONG_SEQ_TEST_NUM_REQS) {
if (utd->completed_req_count > utd->long_test_num_reqs) {
pr_err("%s: Error: Completed more requests than total test requests" pr_err("%s: Error: Completed more requests than total test requests"
, __func__); , __func__);
pr_err("%s: Terminating test.", __func__); pr_err("%s: Terminating test.", __func__);
return true; return true;
} }
return (utd->completed_req_count == LONG_SEQ_TEST_NUM_REQS); return utd->completed_req_count == utd->long_test_num_reqs;
} }
/** /**
@ -760,9 +771,7 @@ static void ufs_test_run_scenario(void *data, async_cookie_t cookie)
/* use randomly generated requests */ /* use randomly generated requests */
if (ts->rnd_req && utd->random_test_seed != 0) if (ts->rnd_req && utd->random_test_seed != 0)
pseudo_rnd_sector_and_size(&utd->random_test_seed, pseudo_rnd_sector_and_size(utd, &start_sec, &num_bios);
ts->test_iosched->start_sector, &start_sec,
&num_bios);
ret = test_iosched_add_wr_rd_test_req(test_iosched, 0, ret = test_iosched_add_wr_rd_test_req(test_iosched, 0,
direction, start_sec, num_bios, TEST_PATTERN_5A, direction, start_sec, num_bios, TEST_PATTERN_5A,
@ -996,13 +1005,18 @@ static void long_test_free_end_io_fn(struct request *rq, int err)
static int run_long_test(struct test_iosched *test_iosched) static int run_long_test(struct test_iosched *test_iosched)
{ {
int ret = 0; int ret = 0;
int direction, long_test_num_requests, num_bios_per_request; int direction, num_bios_per_request;
static unsigned int inserted_requests; static unsigned int inserted_requests;
u32 sector, seed, num_bios, seq_sector_delta; u32 sector, seed, num_bios, seq_sector_delta;
struct ufs_test_data *utd = test_iosched->blk_dev_test_data; struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
BUG_ON(!test_iosched); BUG_ON(!test_iosched);
sector = test_iosched->start_sector; sector = test_iosched->start_sector;
if (test_iosched->sector_range)
utd->sector_range = test_iosched->sector_range;
else
utd->sector_range = TEST_DEFAULT_SECTOR_RANGE;
if (utd->test_stage != UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2) { if (utd->test_stage != UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2) {
test_iosched->test_count = 0; test_iosched->test_count = 0;
utd->completed_req_count = 0; utd->completed_req_count = 0;
@ -1013,23 +1027,29 @@ static int run_long_test(struct test_iosched *test_iosched)
switch (test_iosched->test_info.testcase) { switch (test_iosched->test_info.testcase) {
case UFS_TEST_LONG_RANDOM_READ: case UFS_TEST_LONG_RANDOM_READ:
num_bios_per_request = 1; num_bios_per_request = 1;
long_test_num_requests = LONG_RAND_TEST_NUM_REQS; utd->long_test_num_reqs = (utd->sector_range * SECTOR_SIZE) /
(LONG_RAND_TEST_REQ_RATIO * TEST_BIO_SIZE *
num_bios_per_request);
direction = READ; direction = READ;
break; break;
case UFS_TEST_LONG_RANDOM_WRITE: case UFS_TEST_LONG_RANDOM_WRITE:
num_bios_per_request = 1; num_bios_per_request = 1;
long_test_num_requests = LONG_RAND_TEST_NUM_REQS; utd->long_test_num_reqs = (utd->sector_range * SECTOR_SIZE) /
(LONG_RAND_TEST_REQ_RATIO * TEST_BIO_SIZE *
num_bios_per_request);
direction = WRITE; direction = WRITE;
break; break;
case UFS_TEST_LONG_SEQUENTIAL_READ: case UFS_TEST_LONG_SEQUENTIAL_READ:
num_bios_per_request = TEST_MAX_BIOS_PER_REQ; num_bios_per_request = TEST_MAX_BIOS_PER_REQ;
long_test_num_requests = LONG_SEQ_TEST_NUM_REQS; utd->long_test_num_reqs = (utd->sector_range * SECTOR_SIZE) /
(num_bios_per_request * TEST_BIO_SIZE);
direction = READ; direction = READ;
break; break;
case UFS_TEST_LONG_SEQUENTIAL_WRITE: case UFS_TEST_LONG_SEQUENTIAL_WRITE:
num_bios_per_request = TEST_MAX_BIOS_PER_REQ;
long_test_num_requests = LONG_SEQ_TEST_NUM_REQS;
case UFS_TEST_LONG_SEQUENTIAL_MIXED: case UFS_TEST_LONG_SEQUENTIAL_MIXED:
num_bios_per_request = TEST_MAX_BIOS_PER_REQ;
utd->long_test_num_reqs = (utd->sector_range * SECTOR_SIZE) /
(num_bios_per_request * TEST_BIO_SIZE);
default: default:
direction = WRITE; direction = WRITE;
} }
@ -1039,7 +1059,7 @@ static int run_long_test(struct test_iosched *test_iosched)
seed = utd->random_test_seed ? utd->random_test_seed : MAGIC_SEED; seed = utd->random_test_seed ? utd->random_test_seed : MAGIC_SEED;
pr_info("%s: Adding %d requests, first req_id=%d", __func__, pr_info("%s: Adding %d requests, first req_id=%d", __func__,
long_test_num_requests, test_iosched->wr_rd_next_req_id); utd->long_test_num_reqs, test_iosched->wr_rd_next_req_id);
do { do {
/* /*
@ -1064,8 +1084,7 @@ static int run_long_test(struct test_iosched *test_iosched)
break; break;
case UFS_TEST_LONG_RANDOM_READ: case UFS_TEST_LONG_RANDOM_READ:
case UFS_TEST_LONG_RANDOM_WRITE: case UFS_TEST_LONG_RANDOM_WRITE:
pseudo_rnd_sector_and_size(&seed, pseudo_rnd_sector_and_size(utd, &sector, &num_bios);
test_iosched->start_sector, &sector, &num_bios);
default: default:
break; break;
} }
@ -1090,10 +1109,10 @@ static int run_long_test(struct test_iosched *test_iosched)
inserted_requests++; inserted_requests++;
} }
} while (inserted_requests < long_test_num_requests); } while (inserted_requests < utd->long_test_num_reqs);
/* in this case the queue will not run in the above loop */ /* in this case the queue will not run in the above loop */
if (long_test_num_requests < QUEUE_MAX_REQUESTS) if (utd->long_test_num_reqs < QUEUE_MAX_REQUESTS)
blk_post_runtime_resume(test_iosched->req_q, 0); blk_post_runtime_resume(test_iosched->req_q, 0);
return ret; return ret;
@ -1212,9 +1231,7 @@ static int ufs_test_run_data_integrity_test(struct test_iosched *test_iosched)
for (i = 0; i < QUEUE_MAX_REQUESTS; i++) { for (i = 0; i < QUEUE_MAX_REQUESTS; i++) {
/* make sure that we didn't draw the same start_sector twice */ /* make sure that we didn't draw the same start_sector twice */
while (retries--) { while (retries--) {
pseudo_rnd_sector_and_size(&utd->random_test_seed, pseudo_rnd_sector_and_size(utd, &start_sec, &num_bios);
test_iosched->start_sector, &start_sec,
&num_bios);
sectors[i] = start_sec; sectors[i] = start_sec;
for (j = 0; (j < i) && (sectors[i] != sectors[j]); j++) for (j = 0; (j < i) && (sectors[i] != sectors[j]); j++)
/* just increment j */; /* just increment j */;