zram: remove good and bad compress stats
Remove `good' and `bad' compressed sub-requests stats. RW request may cause a number of RW sub-requests. zram used to account `good' compressed sub-queries (with compressed size less than 50% of original size), `bad' compressed sub-queries (with compressed size greater that 75% of original size), leaving sub-requests with compression size between 50% and 75% of original size not accounted and not reported. zram already accounts each sub-request's compression size so we can calculate real device compression ratio. Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Acked-by: Minchan Kim <minchan@kernel.org> Acked-by: Jerome Marchand <jmarchan@redhat.com> Cc: Nitin Gupta <ngupta@vflare.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
be257c6130
commit
b7cccf8b40
2 changed files with 0 additions and 13 deletions
|
@ -293,7 +293,6 @@ static void zram_free_page(struct zram *zram, size_t index)
|
||||||
{
|
{
|
||||||
struct zram_meta *meta = zram->meta;
|
struct zram_meta *meta = zram->meta;
|
||||||
unsigned long handle = meta->table[index].handle;
|
unsigned long handle = meta->table[index].handle;
|
||||||
u16 size = meta->table[index].size;
|
|
||||||
|
|
||||||
if (unlikely(!handle)) {
|
if (unlikely(!handle)) {
|
||||||
/*
|
/*
|
||||||
|
@ -307,14 +306,8 @@ static void zram_free_page(struct zram *zram, size_t index)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(size > max_zpage_size))
|
|
||||||
atomic_dec(&zram->stats.bad_compress);
|
|
||||||
|
|
||||||
zs_free(meta->mem_pool, handle);
|
zs_free(meta->mem_pool, handle);
|
||||||
|
|
||||||
if (size <= PAGE_SIZE / 2)
|
|
||||||
atomic_dec(&zram->stats.good_compress);
|
|
||||||
|
|
||||||
atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
|
atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
|
||||||
atomic_dec(&zram->stats.pages_stored);
|
atomic_dec(&zram->stats.pages_stored);
|
||||||
|
|
||||||
|
@ -478,7 +471,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(clen > max_zpage_size)) {
|
if (unlikely(clen > max_zpage_size)) {
|
||||||
atomic_inc(&zram->stats.bad_compress);
|
|
||||||
clen = PAGE_SIZE;
|
clen = PAGE_SIZE;
|
||||||
src = NULL;
|
src = NULL;
|
||||||
if (is_partial_io(bvec))
|
if (is_partial_io(bvec))
|
||||||
|
@ -518,9 +510,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
||||||
/* Update stats */
|
/* Update stats */
|
||||||
atomic64_add(clen, &zram->stats.compr_size);
|
atomic64_add(clen, &zram->stats.compr_size);
|
||||||
atomic_inc(&zram->stats.pages_stored);
|
atomic_inc(&zram->stats.pages_stored);
|
||||||
if (clen <= PAGE_SIZE / 2)
|
|
||||||
atomic_inc(&zram->stats.good_compress);
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (locked)
|
if (locked)
|
||||||
mutex_unlock(&meta->buffer_lock);
|
mutex_unlock(&meta->buffer_lock);
|
||||||
|
|
|
@ -78,8 +78,6 @@ struct zram_stats {
|
||||||
atomic64_t notify_free; /* no. of swap slot free notifications */
|
atomic64_t notify_free; /* no. of swap slot free notifications */
|
||||||
atomic_t pages_zero; /* no. of zero filled pages */
|
atomic_t pages_zero; /* no. of zero filled pages */
|
||||||
atomic_t pages_stored; /* no. of pages currently stored */
|
atomic_t pages_stored; /* no. of pages currently stored */
|
||||||
atomic_t good_compress; /* % of pages with compression ratio<=50% */
|
|
||||||
atomic_t bad_compress; /* % of pages with compression ratio>=75% */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct zram_meta {
|
struct zram_meta {
|
||||||
|
|
Loading…
Add table
Reference in a new issue