Merge branch 'kmap_atomic' of git://github.com/congwang/linux

Pull kmap_atomic cleanup from Cong Wang.

It's been in -next for a long time, and it gets rid of the (no longer
used) second argument to k[un]map_atomic().

Fix up a few trivial conflicts in various drivers, and do an "evil
merge" to catch some new uses that have come in since Cong's tree.

* 'kmap_atomic' of git://github.com/congwang/linux: (59 commits)
  feature-removal-schedule.txt: schedule the deprecated form of kmap_atomic() for removal
  highmem: kill all __kmap_atomic() [swarren@nvidia.com: highmem: Fix ARM build break due to __kmap_atomic rename]
  drbd: remove the second argument of k[un]map_atomic()
  zcache: remove the second argument of k[un]map_atomic()
  gma500: remove the second argument of k[un]map_atomic()
  dm: remove the second argument of k[un]map_atomic()
  tomoyo: remove the second argument of k[un]map_atomic()
  sunrpc: remove the second argument of k[un]map_atomic()
  rds: remove the second argument of k[un]map_atomic()
  net: remove the second argument of k[un]map_atomic()
  mm: remove the second argument of k[un]map_atomic()
  lib: remove the second argument of k[un]map_atomic()
  power: remove the second argument of k[un]map_atomic()
  kdb: remove the second argument of k[un]map_atomic()
  udf: remove the second argument of k[un]map_atomic()
  ubifs: remove the second argument of k[un]map_atomic()
  squashfs: remove the second argument of k[un]map_atomic()
  reiserfs: remove the second argument of k[un]map_atomic()
  ocfs2: remove the second argument of k[un]map_atomic()
  ntfs: remove the second argument of k[un]map_atomic()
  ...
This commit is contained in:
Linus Torvalds 2012-03-21 09:40:26 -07:00
commit 9f3938346a
180 changed files with 921 additions and 969 deletions

View file

@ -535,3 +535,11 @@ Why: This driver provides support for USB storage devices like "USB
(CONFIG_USB_STORAGE) which only drawback is the additional SCSI (CONFIG_USB_STORAGE) which only drawback is the additional SCSI
stack. stack.
Who: Sebastian Andrzej Siewior <sebastian@breakpoint.cc> Who: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
----------------------------
What: kmap_atomic(page, km_type)
When: 3.5
Why: The old kmap_atomic() with two arguments is deprecated, we only
keep it for backward compatibility for few cycles and then drop it.
Who: Cong Wang <amwang@redhat.com>

View file

@ -57,7 +57,7 @@ static inline void *kmap_high_get(struct page *page)
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
extern void *kmap(struct page *page); extern void *kmap(struct page *page);
extern void kunmap(struct page *page); extern void kunmap(struct page *page);
extern void *__kmap_atomic(struct page *page); extern void *kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr); extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn); extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(const void *ptr); extern struct page *kmap_atomic_to_page(const void *ptr);

View file

@ -44,11 +44,11 @@ void fa_copy_user_highpage(struct page *to, struct page *from,
{ {
void *kto, *kfrom; void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0); kto = kmap_atomic(to);
kfrom = kmap_atomic(from, KM_USER1); kfrom = kmap_atomic(from);
fa_copy_user_page(kto, kfrom); fa_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kfrom);
kunmap_atomic(kto, KM_USER0); kunmap_atomic(kto);
} }
/* /*
@ -58,7 +58,7 @@ void fa_copy_user_highpage(struct page *to, struct page *from,
*/ */
void fa_clear_user_highpage(struct page *page, unsigned long vaddr) void fa_clear_user_highpage(struct page *page, unsigned long vaddr)
{ {
void *ptr, *kaddr = kmap_atomic(page, KM_USER0); void *ptr, *kaddr = kmap_atomic(page);
asm volatile("\ asm volatile("\
mov r1, %2 @ 1\n\ mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\ mov r2, #0 @ 1\n\
@ -77,7 +77,7 @@ void fa_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr) : "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 32) : "0" (kaddr), "I" (PAGE_SIZE / 32)
: "r1", "r2", "r3", "ip", "lr"); : "r1", "r2", "r3", "ip", "lr");
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
struct cpu_user_fns fa_user_fns __initdata = { struct cpu_user_fns fa_user_fns __initdata = {

View file

@ -72,17 +72,17 @@ void feroceon_copy_user_highpage(struct page *to, struct page *from,
{ {
void *kto, *kfrom; void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0); kto = kmap_atomic(to);
kfrom = kmap_atomic(from, KM_USER1); kfrom = kmap_atomic(from);
flush_cache_page(vma, vaddr, page_to_pfn(from)); flush_cache_page(vma, vaddr, page_to_pfn(from));
feroceon_copy_user_page(kto, kfrom); feroceon_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kfrom);
kunmap_atomic(kto, KM_USER0); kunmap_atomic(kto);
} }
void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr)
{ {
void *ptr, *kaddr = kmap_atomic(page, KM_USER0); void *ptr, *kaddr = kmap_atomic(page);
asm volatile ("\ asm volatile ("\
mov r1, %2 \n\ mov r1, %2 \n\
mov r2, #0 \n\ mov r2, #0 \n\
@ -102,7 +102,7 @@ void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr) : "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 32) : "0" (kaddr), "I" (PAGE_SIZE / 32)
: "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr");
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
struct cpu_user_fns feroceon_user_fns __initdata = { struct cpu_user_fns feroceon_user_fns __initdata = {

View file

@ -42,11 +42,11 @@ void v3_copy_user_highpage(struct page *to, struct page *from,
{ {
void *kto, *kfrom; void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0); kto = kmap_atomic(to);
kfrom = kmap_atomic(from, KM_USER1); kfrom = kmap_atomic(from);
v3_copy_user_page(kto, kfrom); v3_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kfrom);
kunmap_atomic(kto, KM_USER0); kunmap_atomic(kto);
} }
/* /*
@ -56,7 +56,7 @@ void v3_copy_user_highpage(struct page *to, struct page *from,
*/ */
void v3_clear_user_highpage(struct page *page, unsigned long vaddr) void v3_clear_user_highpage(struct page *page, unsigned long vaddr)
{ {
void *ptr, *kaddr = kmap_atomic(page, KM_USER0); void *ptr, *kaddr = kmap_atomic(page);
asm volatile("\n\ asm volatile("\n\
mov r1, %2 @ 1\n\ mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\ mov r2, #0 @ 1\n\
@ -72,7 +72,7 @@ void v3_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr) : "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 64) : "0" (kaddr), "I" (PAGE_SIZE / 64)
: "r1", "r2", "r3", "ip", "lr"); : "r1", "r2", "r3", "ip", "lr");
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
struct cpu_user_fns v3_user_fns __initdata = { struct cpu_user_fns v3_user_fns __initdata = {

View file

@ -71,7 +71,7 @@ mc_copy_user_page(void *from, void *to)
void v4_mc_copy_user_highpage(struct page *to, struct page *from, void v4_mc_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma) unsigned long vaddr, struct vm_area_struct *vma)
{ {
void *kto = kmap_atomic(to, KM_USER1); void *kto = kmap_atomic(to);
if (!test_and_set_bit(PG_dcache_clean, &from->flags)) if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from); __flush_dcache_page(page_mapping(from), from);
@ -85,7 +85,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
raw_spin_unlock(&minicache_lock); raw_spin_unlock(&minicache_lock);
kunmap_atomic(kto, KM_USER1); kunmap_atomic(kto);
} }
/* /*
@ -93,7 +93,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
*/ */
void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
{ {
void *ptr, *kaddr = kmap_atomic(page, KM_USER0); void *ptr, *kaddr = kmap_atomic(page);
asm volatile("\ asm volatile("\
mov r1, %2 @ 1\n\ mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\ mov r2, #0 @ 1\n\
@ -111,7 +111,7 @@ void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr) : "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 64) : "0" (kaddr), "I" (PAGE_SIZE / 64)
: "r1", "r2", "r3", "ip", "lr"); : "r1", "r2", "r3", "ip", "lr");
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
struct cpu_user_fns v4_mc_user_fns __initdata = { struct cpu_user_fns v4_mc_user_fns __initdata = {

View file

@ -52,12 +52,12 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from,
{ {
void *kto, *kfrom; void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0); kto = kmap_atomic(to);
kfrom = kmap_atomic(from, KM_USER1); kfrom = kmap_atomic(from);
flush_cache_page(vma, vaddr, page_to_pfn(from)); flush_cache_page(vma, vaddr, page_to_pfn(from));
v4wb_copy_user_page(kto, kfrom); v4wb_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kfrom);
kunmap_atomic(kto, KM_USER0); kunmap_atomic(kto);
} }
/* /*
@ -67,7 +67,7 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from,
*/ */
void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
{ {
void *ptr, *kaddr = kmap_atomic(page, KM_USER0); void *ptr, *kaddr = kmap_atomic(page);
asm volatile("\ asm volatile("\
mov r1, %2 @ 1\n\ mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\ mov r2, #0 @ 1\n\
@ -86,7 +86,7 @@ void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr) : "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 64) : "0" (kaddr), "I" (PAGE_SIZE / 64)
: "r1", "r2", "r3", "ip", "lr"); : "r1", "r2", "r3", "ip", "lr");
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
struct cpu_user_fns v4wb_user_fns __initdata = { struct cpu_user_fns v4wb_user_fns __initdata = {

View file

@ -48,11 +48,11 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from,
{ {
void *kto, *kfrom; void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0); kto = kmap_atomic(to);
kfrom = kmap_atomic(from, KM_USER1); kfrom = kmap_atomic(from);
v4wt_copy_user_page(kto, kfrom); v4wt_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kfrom);
kunmap_atomic(kto, KM_USER0); kunmap_atomic(kto);
} }
/* /*
@ -62,7 +62,7 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from,
*/ */
void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
{ {
void *ptr, *kaddr = kmap_atomic(page, KM_USER0); void *ptr, *kaddr = kmap_atomic(page);
asm volatile("\ asm volatile("\
mov r1, %2 @ 1\n\ mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\ mov r2, #0 @ 1\n\
@ -79,7 +79,7 @@ void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr) : "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 64) : "0" (kaddr), "I" (PAGE_SIZE / 64)
: "r1", "r2", "r3", "ip", "lr"); : "r1", "r2", "r3", "ip", "lr");
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
struct cpu_user_fns v4wt_user_fns __initdata = { struct cpu_user_fns v4wt_user_fns __initdata = {

View file

@ -38,11 +38,11 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
{ {
void *kto, *kfrom; void *kto, *kfrom;
kfrom = kmap_atomic(from, KM_USER0); kfrom = kmap_atomic(from);
kto = kmap_atomic(to, KM_USER1); kto = kmap_atomic(to);
copy_page(kto, kfrom); copy_page(kto, kfrom);
kunmap_atomic(kto, KM_USER1); kunmap_atomic(kto);
kunmap_atomic(kfrom, KM_USER0); kunmap_atomic(kfrom);
} }
/* /*
@ -51,9 +51,9 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
*/ */
static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
{ {
void *kaddr = kmap_atomic(page, KM_USER0); void *kaddr = kmap_atomic(page);
clear_page(kaddr); clear_page(kaddr);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
/* /*

View file

@ -75,12 +75,12 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
{ {
void *kto, *kfrom; void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0); kto = kmap_atomic(to);
kfrom = kmap_atomic(from, KM_USER1); kfrom = kmap_atomic(from);
flush_cache_page(vma, vaddr, page_to_pfn(from)); flush_cache_page(vma, vaddr, page_to_pfn(from));
xsc3_mc_copy_user_page(kto, kfrom); xsc3_mc_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kfrom);
kunmap_atomic(kto, KM_USER0); kunmap_atomic(kto);
} }
/* /*
@ -90,7 +90,7 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
*/ */
void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
{ {
void *ptr, *kaddr = kmap_atomic(page, KM_USER0); void *ptr, *kaddr = kmap_atomic(page);
asm volatile ("\ asm volatile ("\
mov r1, %2 \n\ mov r1, %2 \n\
mov r2, #0 \n\ mov r2, #0 \n\
@ -105,7 +105,7 @@ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr) : "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 32) : "0" (kaddr), "I" (PAGE_SIZE / 32)
: "r1", "r2", "r3"); : "r1", "r2", "r3");
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
struct cpu_user_fns xsc3_mc_user_fns __initdata = { struct cpu_user_fns xsc3_mc_user_fns __initdata = {

View file

@ -93,7 +93,7 @@ mc_copy_user_page(void *from, void *to)
void xscale_mc_copy_user_highpage(struct page *to, struct page *from, void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma) unsigned long vaddr, struct vm_area_struct *vma)
{ {
void *kto = kmap_atomic(to, KM_USER1); void *kto = kmap_atomic(to);
if (!test_and_set_bit(PG_dcache_clean, &from->flags)) if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from); __flush_dcache_page(page_mapping(from), from);
@ -107,7 +107,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
raw_spin_unlock(&minicache_lock); raw_spin_unlock(&minicache_lock);
kunmap_atomic(kto, KM_USER1); kunmap_atomic(kto);
} }
/* /*
@ -116,7 +116,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
void void
xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
{ {
void *ptr, *kaddr = kmap_atomic(page, KM_USER0); void *ptr, *kaddr = kmap_atomic(page);
asm volatile( asm volatile(
"mov r1, %2 \n\ "mov r1, %2 \n\
mov r2, #0 \n\ mov r2, #0 \n\
@ -133,7 +133,7 @@ xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr) : "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 32) : "0" (kaddr), "I" (PAGE_SIZE / 32)
: "r1", "r2", "r3", "ip"); : "r1", "r2", "r3", "ip");
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
struct cpu_user_fns xscale_mc_user_fns __initdata = { struct cpu_user_fns xscale_mc_user_fns __initdata = {

View file

@ -36,7 +36,7 @@ void kunmap(struct page *page)
} }
EXPORT_SYMBOL(kunmap); EXPORT_SYMBOL(kunmap);
void *__kmap_atomic(struct page *page) void *kmap_atomic(struct page *page)
{ {
unsigned int idx; unsigned int idx;
unsigned long vaddr; unsigned long vaddr;
@ -81,7 +81,7 @@ void *__kmap_atomic(struct page *page)
return (void *)vaddr; return (void *)vaddr;
} }
EXPORT_SYMBOL(__kmap_atomic); EXPORT_SYMBOL(kmap_atomic);
void __kunmap_atomic(void *kvaddr) void __kunmap_atomic(void *kvaddr)
{ {

View file

@ -157,7 +157,7 @@ static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type)
pagefault_enable(); pagefault_enable();
} }
void *__kmap_atomic(struct page *page); void *kmap_atomic(struct page *page);
void __kunmap_atomic(void *kvaddr); void __kunmap_atomic(void *kvaddr);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */

View file

@ -37,7 +37,7 @@ struct page *kmap_atomic_to_page(void *ptr)
return virt_to_page(ptr); return virt_to_page(ptr);
} }
void *__kmap_atomic(struct page *page) void *kmap_atomic(struct page *page)
{ {
unsigned long paddr; unsigned long paddr;
int type; int type;
@ -64,7 +64,7 @@ void *__kmap_atomic(struct page *page)
return NULL; return NULL;
} }
} }
EXPORT_SYMBOL(__kmap_atomic); EXPORT_SYMBOL(kmap_atomic);
void __kunmap_atomic(void *kvaddr) void __kunmap_atomic(void *kvaddr)
{ {

View file

@ -47,7 +47,7 @@ extern void kunmap_high(struct page *page);
extern void *kmap(struct page *page); extern void *kmap(struct page *page);
extern void kunmap(struct page *page); extern void kunmap(struct page *page);
extern void *__kmap_atomic(struct page *page); extern void *kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr); extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn); extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(void *ptr); extern struct page *kmap_atomic_to_page(void *ptr);

View file

@ -498,7 +498,7 @@ static inline void local_r4k_flush_cache_page(void *args)
if (map_coherent) if (map_coherent)
vaddr = kmap_coherent(page, addr); vaddr = kmap_coherent(page, addr);
else else
vaddr = kmap_atomic(page, KM_USER0); vaddr = kmap_atomic(page);
addr = (unsigned long)vaddr; addr = (unsigned long)vaddr;
} }
@ -521,7 +521,7 @@ static inline void local_r4k_flush_cache_page(void *args)
if (map_coherent) if (map_coherent)
kunmap_coherent(); kunmap_coherent();
else else
kunmap_atomic(vaddr, KM_USER0); kunmap_atomic(vaddr);
} }
} }

View file

@ -41,7 +41,7 @@ EXPORT_SYMBOL(kunmap);
* kmaps are appropriate for short, tight code paths only. * kmaps are appropriate for short, tight code paths only.
*/ */
void *__kmap_atomic(struct page *page) void *kmap_atomic(struct page *page)
{ {
unsigned long vaddr; unsigned long vaddr;
int idx, type; int idx, type;
@ -62,7 +62,7 @@ void *__kmap_atomic(struct page *page)
return (void*) vaddr; return (void*) vaddr;
} }
EXPORT_SYMBOL(__kmap_atomic); EXPORT_SYMBOL(kmap_atomic);
void __kunmap_atomic(void *kvaddr) void __kunmap_atomic(void *kvaddr)
{ {

View file

@ -207,21 +207,21 @@ void copy_user_highpage(struct page *to, struct page *from,
{ {
void *vfrom, *vto; void *vfrom, *vto;
vto = kmap_atomic(to, KM_USER1); vto = kmap_atomic(to);
if (cpu_has_dc_aliases && if (cpu_has_dc_aliases &&
page_mapped(from) && !Page_dcache_dirty(from)) { page_mapped(from) && !Page_dcache_dirty(from)) {
vfrom = kmap_coherent(from, vaddr); vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom); copy_page(vto, vfrom);
kunmap_coherent(); kunmap_coherent();
} else { } else {
vfrom = kmap_atomic(from, KM_USER0); vfrom = kmap_atomic(from);
copy_page(vto, vfrom); copy_page(vto, vfrom);
kunmap_atomic(vfrom, KM_USER0); kunmap_atomic(vfrom);
} }
if ((!cpu_has_ic_fills_f_dc) || if ((!cpu_has_ic_fills_f_dc) ||
pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
flush_data_cache_page((unsigned long)vto); flush_data_cache_page((unsigned long)vto);
kunmap_atomic(vto, KM_USER1); kunmap_atomic(vto);
/* Make sure this page is cleared on other CPU's too before using it */ /* Make sure this page is cleared on other CPU's too before using it */
smp_wmb(); smp_wmb();
} }

View file

@ -70,7 +70,7 @@ static inline void kunmap(struct page *page)
* be used in IRQ contexts, so in some (very limited) cases we need * be used in IRQ contexts, so in some (very limited) cases we need
* it. * it.
*/ */
static inline unsigned long __kmap_atomic(struct page *page) static inline unsigned long kmap_atomic(struct page *page)
{ {
unsigned long vaddr; unsigned long vaddr;
int idx, type; int idx, type;

View file

@ -140,7 +140,7 @@ static inline void *kmap(struct page *page)
#define kunmap(page) kunmap_parisc(page_address(page)) #define kunmap(page) kunmap_parisc(page_address(page))
static inline void *__kmap_atomic(struct page *page) static inline void *kmap_atomic(struct page *page)
{ {
pagefault_disable(); pagefault_disable();
return page_address(page); return page_address(page);

View file

@ -79,7 +79,7 @@ static inline void kunmap(struct page *page)
kunmap_high(page); kunmap_high(page);
} }
static inline void *__kmap_atomic(struct page *page) static inline void *kmap_atomic(struct page *page)
{ {
return kmap_atomic_prot(page, kmap_prot); return kmap_atomic_prot(page, kmap_prot);
} }

View file

@ -227,14 +227,14 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
hpage_offset /= 4; hpage_offset /= 4;
get_page(hpage); get_page(hpage);
page = kmap_atomic(hpage, KM_USER0); page = kmap_atomic(hpage);
/* patch dcbz into reserved instruction, so we trap */ /* patch dcbz into reserved instruction, so we trap */
for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
if ((page[i] & 0xff0007ff) == INS_DCBZ) if ((page[i] & 0xff0007ff) == INS_DCBZ)
page[i] &= 0xfffffff7; page[i] &= 0xfffffff7;
kunmap_atomic(page, KM_USER0); kunmap_atomic(page);
put_page(hpage); put_page(hpage);
} }

View file

@ -365,12 +365,11 @@ static inline void __dma_sync_page_highmem(struct page *page,
local_irq_save(flags); local_irq_save(flags);
do { do {
start = (unsigned long)kmap_atomic(page + seg_nr, start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset;
KM_PPC_SYNC_PAGE) + seg_offset;
/* Sync this buffer segment */ /* Sync this buffer segment */
__dma_sync((void *)start, seg_size, direction); __dma_sync((void *)start, seg_size, direction);
kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE); kunmap_atomic((void *)start);
seg_nr++; seg_nr++;
/* Calculate next buffer segment size */ /* Calculate next buffer segment size */

View file

@ -910,9 +910,9 @@ void flush_dcache_icache_hugepage(struct page *page)
if (!PageHighMem(page)) { if (!PageHighMem(page)) {
__flush_dcache_icache(page_address(page+i)); __flush_dcache_icache(page_address(page+i));
} else { } else {
start = kmap_atomic(page+i, KM_PPC_SYNC_ICACHE); start = kmap_atomic(page+i);
__flush_dcache_icache(start); __flush_dcache_icache(start);
kunmap_atomic(start, KM_PPC_SYNC_ICACHE); kunmap_atomic(start);
} }
} }
} }

View file

@ -458,9 +458,9 @@ void flush_dcache_icache_page(struct page *page)
#endif #endif
#ifdef CONFIG_BOOKE #ifdef CONFIG_BOOKE
{ {
void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE); void *start = kmap_atomic(page);
__flush_dcache_icache(start); __flush_dcache_icache(start);
kunmap_atomic(start, KM_PPC_SYNC_ICACHE); kunmap_atomic(start);
} }
#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64) #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
/* On 8xx there is no need to kmap since highmem is not supported */ /* On 8xx there is no need to kmap since highmem is not supported */

View file

@ -244,7 +244,7 @@ static void sh4_flush_cache_page(void *args)
if (map_coherent) if (map_coherent)
vaddr = kmap_coherent(page, address); vaddr = kmap_coherent(page, address);
else else
vaddr = kmap_atomic(page, KM_USER0); vaddr = kmap_atomic(page);
address = (unsigned long)vaddr; address = (unsigned long)vaddr;
} }
@ -259,7 +259,7 @@ static void sh4_flush_cache_page(void *args)
if (map_coherent) if (map_coherent)
kunmap_coherent(vaddr); kunmap_coherent(vaddr);
else else
kunmap_atomic(vaddr, KM_USER0); kunmap_atomic(vaddr);
} }
} }

View file

@ -95,7 +95,7 @@ void copy_user_highpage(struct page *to, struct page *from,
{ {
void *vfrom, *vto; void *vfrom, *vto;
vto = kmap_atomic(to, KM_USER1); vto = kmap_atomic(to);
if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
test_bit(PG_dcache_clean, &from->flags)) { test_bit(PG_dcache_clean, &from->flags)) {
@ -103,16 +103,16 @@ void copy_user_highpage(struct page *to, struct page *from,
copy_page(vto, vfrom); copy_page(vto, vfrom);
kunmap_coherent(vfrom); kunmap_coherent(vfrom);
} else { } else {
vfrom = kmap_atomic(from, KM_USER0); vfrom = kmap_atomic(from);
copy_page(vto, vfrom); copy_page(vto, vfrom);
kunmap_atomic(vfrom, KM_USER0); kunmap_atomic(vfrom);
} }
if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) || if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
(vma->vm_flags & VM_EXEC)) (vma->vm_flags & VM_EXEC))
__flush_purge_region(vto, PAGE_SIZE); __flush_purge_region(vto, PAGE_SIZE);
kunmap_atomic(vto, KM_USER1); kunmap_atomic(vto);
/* Make sure this page is cleared on other CPU's too before using it */ /* Make sure this page is cleared on other CPU's too before using it */
smp_wmb(); smp_wmb();
} }
@ -120,14 +120,14 @@ EXPORT_SYMBOL(copy_user_highpage);
void clear_user_highpage(struct page *page, unsigned long vaddr) void clear_user_highpage(struct page *page, unsigned long vaddr)
{ {
void *kaddr = kmap_atomic(page, KM_USER0); void *kaddr = kmap_atomic(page);
clear_page(kaddr); clear_page(kaddr);
if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
__flush_purge_region(kaddr, PAGE_SIZE); __flush_purge_region(kaddr, PAGE_SIZE);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
EXPORT_SYMBOL(clear_user_highpage); EXPORT_SYMBOL(clear_user_highpage);

View file

@ -70,7 +70,7 @@ static inline void kunmap(struct page *page)
kunmap_high(page); kunmap_high(page);
} }
extern void *__kmap_atomic(struct page *page); extern void *kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr); extern void __kunmap_atomic(void *kvaddr);
extern struct page *kmap_atomic_to_page(void *vaddr); extern struct page *kmap_atomic_to_page(void *vaddr);

View file

@ -30,7 +30,7 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
void *__kmap_atomic(struct page *page) void *kmap_atomic(struct page *page)
{ {
unsigned long vaddr; unsigned long vaddr;
long idx, type; long idx, type;
@ -64,7 +64,7 @@ void *__kmap_atomic(struct page *page)
return (void*) vaddr; return (void*) vaddr;
} }
EXPORT_SYMBOL(__kmap_atomic); EXPORT_SYMBOL(kmap_atomic);
void __kunmap_atomic(void *kvaddr) void __kunmap_atomic(void *kvaddr)
{ {

View file

@ -59,7 +59,7 @@ void *kmap_fix_kpte(struct page *page, int finished);
/* This macro is used only in map_new_virtual() to map "page". */ /* This macro is used only in map_new_virtual() to map "page". */
#define kmap_prot page_to_kpgprot(page) #define kmap_prot page_to_kpgprot(page)
void *__kmap_atomic(struct page *page); void *kmap_atomic(struct page *page);
void __kunmap_atomic(void *kvaddr); void __kunmap_atomic(void *kvaddr);
void *kmap_atomic_pfn(unsigned long pfn); void *kmap_atomic_pfn(unsigned long pfn);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);

View file

@ -224,12 +224,12 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
} }
EXPORT_SYMBOL(kmap_atomic_prot); EXPORT_SYMBOL(kmap_atomic_prot);
void *__kmap_atomic(struct page *page) void *kmap_atomic(struct page *page)
{ {
/* PAGE_NONE is a magic value that tells us to check immutability. */ /* PAGE_NONE is a magic value that tells us to check immutability. */
return kmap_atomic_prot(page, PAGE_NONE); return kmap_atomic_prot(page, PAGE_NONE);
} }
EXPORT_SYMBOL(__kmap_atomic); EXPORT_SYMBOL(kmap_atomic);
void __kunmap_atomic(void *kvaddr) void __kunmap_atomic(void *kvaddr)
{ {

View file

@ -69,7 +69,7 @@ static int do_op_one_page(unsigned long addr, int len, int is_write,
return -1; return -1;
page = pte_page(*pte); page = pte_page(*pte);
addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) + addr = (unsigned long) kmap_atomic(page) +
(addr & ~PAGE_MASK); (addr & ~PAGE_MASK);
current->thread.fault_catcher = &buf; current->thread.fault_catcher = &buf;
@ -82,7 +82,7 @@ static int do_op_one_page(unsigned long addr, int len, int is_write,
current->thread.fault_catcher = NULL; current->thread.fault_catcher = NULL;
kunmap_atomic((void *)addr, KM_UML_USERCOPY); kunmap_atomic((void *)addr);
return n; return n;
} }

View file

@ -1108,12 +1108,12 @@ static int __driver_rfc4106_encrypt(struct aead_request *req)
one_entry_in_sg = 1; one_entry_in_sg = 1;
scatterwalk_start(&src_sg_walk, req->src); scatterwalk_start(&src_sg_walk, req->src);
scatterwalk_start(&assoc_sg_walk, req->assoc); scatterwalk_start(&assoc_sg_walk, req->assoc);
src = scatterwalk_map(&src_sg_walk, 0); src = scatterwalk_map(&src_sg_walk);
assoc = scatterwalk_map(&assoc_sg_walk, 0); assoc = scatterwalk_map(&assoc_sg_walk);
dst = src; dst = src;
if (unlikely(req->src != req->dst)) { if (unlikely(req->src != req->dst)) {
scatterwalk_start(&dst_sg_walk, req->dst); scatterwalk_start(&dst_sg_walk, req->dst);
dst = scatterwalk_map(&dst_sg_walk, 0); dst = scatterwalk_map(&dst_sg_walk);
} }
} else { } else {
@ -1137,11 +1137,11 @@ static int __driver_rfc4106_encrypt(struct aead_request *req)
* back to the packet. */ * back to the packet. */
if (one_entry_in_sg) { if (one_entry_in_sg) {
if (unlikely(req->src != req->dst)) { if (unlikely(req->src != req->dst)) {
scatterwalk_unmap(dst, 0); scatterwalk_unmap(dst);
scatterwalk_done(&dst_sg_walk, 0, 0); scatterwalk_done(&dst_sg_walk, 0, 0);
} }
scatterwalk_unmap(src, 0); scatterwalk_unmap(src);
scatterwalk_unmap(assoc, 0); scatterwalk_unmap(assoc);
scatterwalk_done(&src_sg_walk, 0, 0); scatterwalk_done(&src_sg_walk, 0, 0);
scatterwalk_done(&assoc_sg_walk, 0, 0); scatterwalk_done(&assoc_sg_walk, 0, 0);
} else { } else {
@ -1190,12 +1190,12 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
one_entry_in_sg = 1; one_entry_in_sg = 1;
scatterwalk_start(&src_sg_walk, req->src); scatterwalk_start(&src_sg_walk, req->src);
scatterwalk_start(&assoc_sg_walk, req->assoc); scatterwalk_start(&assoc_sg_walk, req->assoc);
src = scatterwalk_map(&src_sg_walk, 0); src = scatterwalk_map(&src_sg_walk);
assoc = scatterwalk_map(&assoc_sg_walk, 0); assoc = scatterwalk_map(&assoc_sg_walk);
dst = src; dst = src;
if (unlikely(req->src != req->dst)) { if (unlikely(req->src != req->dst)) {
scatterwalk_start(&dst_sg_walk, req->dst); scatterwalk_start(&dst_sg_walk, req->dst);
dst = scatterwalk_map(&dst_sg_walk, 0); dst = scatterwalk_map(&dst_sg_walk);
} }
} else { } else {
@ -1220,11 +1220,11 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
if (one_entry_in_sg) { if (one_entry_in_sg) {
if (unlikely(req->src != req->dst)) { if (unlikely(req->src != req->dst)) {
scatterwalk_unmap(dst, 0); scatterwalk_unmap(dst);
scatterwalk_done(&dst_sg_walk, 0, 0); scatterwalk_done(&dst_sg_walk, 0, 0);
} }
scatterwalk_unmap(src, 0); scatterwalk_unmap(src);
scatterwalk_unmap(assoc, 0); scatterwalk_unmap(assoc);
scatterwalk_done(&src_sg_walk, 0, 0); scatterwalk_done(&src_sg_walk, 0, 0);
scatterwalk_done(&assoc_sg_walk, 0, 0); scatterwalk_done(&assoc_sg_walk, 0, 0);
} else { } else {

View file

@ -61,7 +61,7 @@ void *kmap(struct page *page);
void kunmap(struct page *page); void kunmap(struct page *page);
void *kmap_atomic_prot(struct page *page, pgprot_t prot); void *kmap_atomic_prot(struct page *page, pgprot_t prot);
void *__kmap_atomic(struct page *page); void *kmap_atomic(struct page *page);
void __kunmap_atomic(void *kvaddr); void __kunmap_atomic(void *kvaddr);
void *kmap_atomic_pfn(unsigned long pfn); void *kmap_atomic_pfn(unsigned long pfn);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);

View file

@ -62,16 +62,16 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!userbuf) { if (!userbuf) {
memcpy(buf, (vaddr + offset), csize); memcpy(buf, (vaddr + offset), csize);
kunmap_atomic(vaddr, KM_PTE0); kunmap_atomic(vaddr);
} else { } else {
if (!kdump_buf_page) { if (!kdump_buf_page) {
printk(KERN_WARNING "Kdump: Kdump buffer page not" printk(KERN_WARNING "Kdump: Kdump buffer page not"
" allocated\n"); " allocated\n");
kunmap_atomic(vaddr, KM_PTE0); kunmap_atomic(vaddr);
return -EFAULT; return -EFAULT;
} }
copy_page(kdump_buf_page, vaddr); copy_page(kdump_buf_page, vaddr);
kunmap_atomic(vaddr, KM_PTE0); kunmap_atomic(vaddr);
if (copy_to_user(buf, (kdump_buf_page + offset), csize)) if (copy_to_user(buf, (kdump_buf_page + offset), csize))
return -EFAULT; return -EFAULT;
} }

View file

@ -1283,9 +1283,9 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr) if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
return; return;
vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0); vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)); data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
kunmap_atomic(vapic, KM_USER0); kunmap_atomic(vapic);
apic_set_tpr(vcpu->arch.apic, data & 0xff); apic_set_tpr(vcpu->arch.apic, data & 0xff);
} }
@ -1310,9 +1310,9 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
max_isr = 0; max_isr = 0;
data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0); vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
*(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data; *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
kunmap_atomic(vapic, KM_USER0); kunmap_atomic(vapic);
} }
void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)

View file

@ -92,9 +92,9 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
if (unlikely(npages != 1)) if (unlikely(npages != 1))
return -EFAULT; return -EFAULT;
table = kmap_atomic(page, KM_USER0); table = kmap_atomic(page);
ret = CMPXCHG(&table[index], orig_pte, new_pte); ret = CMPXCHG(&table[index], orig_pte, new_pte);
kunmap_atomic(table, KM_USER0); kunmap_atomic(table);
kvm_release_page_dirty(page); kvm_release_page_dirty(page);

View file

@ -1162,12 +1162,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/ */
vcpu->hv_clock.version += 2; vcpu->hv_clock.version += 2;
shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0); shared_kaddr = kmap_atomic(vcpu->time_page);
memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
sizeof(vcpu->hv_clock)); sizeof(vcpu->hv_clock));
kunmap_atomic(shared_kaddr, KM_USER0); kunmap_atomic(shared_kaddr);
mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
return 0; return 0;
@ -3848,7 +3848,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
goto emul_write; goto emul_write;
} }
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page);
kaddr += offset_in_page(gpa); kaddr += offset_in_page(gpa);
switch (bytes) { switch (bytes) {
case 1: case 1:
@ -3866,7 +3866,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
default: default:
BUG(); BUG();
} }
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
kvm_release_page_dirty(page); kvm_release_page_dirty(page);
if (!exchanged) if (!exchanged)

View file

@ -760,9 +760,9 @@ survive:
break; break;
} }
maddr = kmap_atomic(pg, KM_USER0); maddr = kmap_atomic(pg);
memcpy(maddr + offset, from, len); memcpy(maddr + offset, from, len);
kunmap_atomic(maddr, KM_USER0); kunmap_atomic(maddr);
set_page_dirty_lock(pg); set_page_dirty_lock(pg);
put_page(pg); put_page(pg);
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);

View file

@ -51,11 +51,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
} }
EXPORT_SYMBOL(kmap_atomic_prot); EXPORT_SYMBOL(kmap_atomic_prot);
void *__kmap_atomic(struct page *page) void *kmap_atomic(struct page *page)
{ {
return kmap_atomic_prot(page, kmap_prot); return kmap_atomic_prot(page, kmap_prot);
} }
EXPORT_SYMBOL(__kmap_atomic); EXPORT_SYMBOL(kmap_atomic);
/* /*
* This is the same as kmap_atomic() but can map memory that doesn't * This is the same as kmap_atomic() but can map memory that doesn't

View file

@ -46,7 +46,7 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
unsigned int nbytes = min(walk->entrylen, unsigned int nbytes = min(walk->entrylen,
((unsigned int)(PAGE_SIZE)) - offset); ((unsigned int)(PAGE_SIZE)) - offset);
walk->data = crypto_kmap(walk->pg, 0); walk->data = kmap_atomic(walk->pg);
walk->data += offset; walk->data += offset;
if (offset & alignmask) { if (offset & alignmask) {
@ -93,7 +93,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
return nbytes; return nbytes;
} }
crypto_kunmap(walk->data, 0); kunmap_atomic(walk->data);
crypto_yield(walk->flags); crypto_yield(walk->flags);
if (err) if (err)

View file

@ -79,13 +79,13 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
/* wait for any prerequisite operations */ /* wait for any prerequisite operations */
async_tx_quiesce(&submit->depend_tx); async_tx_quiesce(&submit->depend_tx);
dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; dest_buf = kmap_atomic(dest) + dest_offset;
src_buf = kmap_atomic(src, KM_USER1) + src_offset; src_buf = kmap_atomic(src) + src_offset;
memcpy(dest_buf, src_buf, len); memcpy(dest_buf, src_buf, len);
kunmap_atomic(src_buf, KM_USER1); kunmap_atomic(src_buf);
kunmap_atomic(dest_buf, KM_USER0); kunmap_atomic(dest_buf);
async_tx_sync_epilog(submit); async_tx_sync_epilog(submit);
} }

View file

@ -43,22 +43,22 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,
static inline void blkcipher_map_src(struct blkcipher_walk *walk) static inline void blkcipher_map_src(struct blkcipher_walk *walk)
{ {
walk->src.virt.addr = scatterwalk_map(&walk->in, 0); walk->src.virt.addr = scatterwalk_map(&walk->in);
} }
static inline void blkcipher_map_dst(struct blkcipher_walk *walk) static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
{ {
walk->dst.virt.addr = scatterwalk_map(&walk->out, 1); walk->dst.virt.addr = scatterwalk_map(&walk->out);
} }
static inline void blkcipher_unmap_src(struct blkcipher_walk *walk) static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
{ {
scatterwalk_unmap(walk->src.virt.addr, 0); scatterwalk_unmap(walk->src.virt.addr);
} }
static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk) static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
{ {
scatterwalk_unmap(walk->dst.virt.addr, 1); scatterwalk_unmap(walk->dst.virt.addr);
} }
/* Get a spot of the specified length that does not straddle a page. /* Get a spot of the specified length that does not straddle a page.

View file

@ -216,12 +216,12 @@ static void get_data_to_compute(struct crypto_cipher *tfm,
scatterwalk_start(&walk, sg_next(walk.sg)); scatterwalk_start(&walk, sg_next(walk.sg));
n = scatterwalk_clamp(&walk, len); n = scatterwalk_clamp(&walk, len);
} }
data_src = scatterwalk_map(&walk, 0); data_src = scatterwalk_map(&walk);
compute_mac(tfm, data_src, n, pctx); compute_mac(tfm, data_src, n, pctx);
len -= n; len -= n;
scatterwalk_unmap(data_src, 0); scatterwalk_unmap(data_src);
scatterwalk_advance(&walk, n); scatterwalk_advance(&walk, n);
scatterwalk_done(&walk, 0, len); scatterwalk_done(&walk, 0, len);
if (len) if (len)

View file

@ -40,9 +40,9 @@ void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
} }
EXPORT_SYMBOL_GPL(scatterwalk_start); EXPORT_SYMBOL_GPL(scatterwalk_start);
void *scatterwalk_map(struct scatter_walk *walk, int out) void *scatterwalk_map(struct scatter_walk *walk)
{ {
return crypto_kmap(scatterwalk_page(walk), out) + return kmap_atomic(scatterwalk_page(walk)) +
offset_in_page(walk->offset); offset_in_page(walk->offset);
} }
EXPORT_SYMBOL_GPL(scatterwalk_map); EXPORT_SYMBOL_GPL(scatterwalk_map);
@ -83,9 +83,9 @@ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
if (len_this_page > nbytes) if (len_this_page > nbytes)
len_this_page = nbytes; len_this_page = nbytes;
vaddr = scatterwalk_map(walk, out); vaddr = scatterwalk_map(walk);
memcpy_dir(buf, vaddr, len_this_page, out); memcpy_dir(buf, vaddr, len_this_page, out);
scatterwalk_unmap(vaddr, out); scatterwalk_unmap(vaddr);
scatterwalk_advance(walk, len_this_page); scatterwalk_advance(walk, len_this_page);

View file

@ -281,10 +281,10 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
void *data; void *data;
data = crypto_kmap(sg_page(sg), 0); data = kmap_atomic(sg_page(sg));
err = crypto_shash_digest(desc, data + offset, nbytes, err = crypto_shash_digest(desc, data + offset, nbytes,
req->result); req->result);
crypto_kunmap(data, 0); kunmap_atomic(data);
crypto_yield(desc->flags); crypto_yield(desc->flags);
} else } else
err = crypto_shash_init(desc) ?: err = crypto_shash_init(desc) ?:
@ -420,9 +420,9 @@ static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg,
desc->flags = hdesc->flags; desc->flags = hdesc->flags;
data = crypto_kmap(sg_page(sg), 0); data = kmap_atomic(sg_page(sg));
err = crypto_shash_digest(desc, data + offset, nbytes, out); err = crypto_shash_digest(desc, data + offset, nbytes, out);
crypto_kunmap(data, 0); kunmap_atomic(data);
crypto_yield(desc->flags); crypto_yield(desc->flags);
goto out; goto out;
} }

View file

@ -720,13 +720,13 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
/* FIXME: use a bounce buffer */ /* FIXME: use a bounce buffer */
local_irq_save(flags); local_irq_save(flags);
buf = kmap_atomic(page, KM_IRQ0); buf = kmap_atomic(page);
/* do the actual data transfer */ /* do the actual data transfer */
ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
do_write); do_write);
kunmap_atomic(buf, KM_IRQ0); kunmap_atomic(buf);
local_irq_restore(flags); local_irq_restore(flags);
} else { } else {
buf = page_address(page); buf = page_address(page);
@ -865,13 +865,13 @@ next_sg:
/* FIXME: use bounce buffer */ /* FIXME: use bounce buffer */
local_irq_save(flags); local_irq_save(flags);
buf = kmap_atomic(page, KM_IRQ0); buf = kmap_atomic(page);
/* do the actual data transfer */ /* do the actual data transfer */
consumed = ap->ops->sff_data_xfer(dev, buf + offset, consumed = ap->ops->sff_data_xfer(dev, buf + offset,
count, rw); count, rw);
kunmap_atomic(buf, KM_IRQ0); kunmap_atomic(buf);
local_irq_restore(flags); local_irq_restore(flags);
} else { } else {
buf = page_address(page); buf = page_address(page);

View file

@ -242,9 +242,9 @@ static void copy_to_brd(struct brd_device *brd, const void *src,
page = brd_lookup_page(brd, sector); page = brd_lookup_page(brd, sector);
BUG_ON(!page); BUG_ON(!page);
dst = kmap_atomic(page, KM_USER1); dst = kmap_atomic(page);
memcpy(dst + offset, src, copy); memcpy(dst + offset, src, copy);
kunmap_atomic(dst, KM_USER1); kunmap_atomic(dst);
if (copy < n) { if (copy < n) {
src += copy; src += copy;
@ -253,9 +253,9 @@ static void copy_to_brd(struct brd_device *brd, const void *src,
page = brd_lookup_page(brd, sector); page = brd_lookup_page(brd, sector);
BUG_ON(!page); BUG_ON(!page);
dst = kmap_atomic(page, KM_USER1); dst = kmap_atomic(page);
memcpy(dst, src, copy); memcpy(dst, src, copy);
kunmap_atomic(dst, KM_USER1); kunmap_atomic(dst);
} }
} }
@ -273,9 +273,9 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
copy = min_t(size_t, n, PAGE_SIZE - offset); copy = min_t(size_t, n, PAGE_SIZE - offset);
page = brd_lookup_page(brd, sector); page = brd_lookup_page(brd, sector);
if (page) { if (page) {
src = kmap_atomic(page, KM_USER1); src = kmap_atomic(page);
memcpy(dst, src + offset, copy); memcpy(dst, src + offset, copy);
kunmap_atomic(src, KM_USER1); kunmap_atomic(src);
} else } else
memset(dst, 0, copy); memset(dst, 0, copy);
@ -285,9 +285,9 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
copy = n - copy; copy = n - copy;
page = brd_lookup_page(brd, sector); page = brd_lookup_page(brd, sector);
if (page) { if (page) {
src = kmap_atomic(page, KM_USER1); src = kmap_atomic(page);
memcpy(dst, src, copy); memcpy(dst, src, copy);
kunmap_atomic(src, KM_USER1); kunmap_atomic(src);
} else } else
memset(dst, 0, copy); memset(dst, 0, copy);
} }
@ -309,7 +309,7 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page,
goto out; goto out;
} }
mem = kmap_atomic(page, KM_USER0); mem = kmap_atomic(page);
if (rw == READ) { if (rw == READ) {
copy_from_brd(mem + off, brd, sector, len); copy_from_brd(mem + off, brd, sector, len);
flush_dcache_page(page); flush_dcache_page(page);
@ -317,7 +317,7 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page,
flush_dcache_page(page); flush_dcache_page(page);
copy_to_brd(brd, mem + off, sector, len); copy_to_brd(brd, mem + off, sector, len);
} }
kunmap_atomic(mem, KM_USER0); kunmap_atomic(mem);
out: out:
return err; return err;

View file

@ -289,25 +289,25 @@ static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
return page_nr; return page_nr;
} }
static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km) static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
{ {
struct page *page = b->bm_pages[idx]; struct page *page = b->bm_pages[idx];
return (unsigned long *) kmap_atomic(page, km); return (unsigned long *) kmap_atomic(page);
} }
static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
{ {
return __bm_map_pidx(b, idx, KM_IRQ1); return __bm_map_pidx(b, idx);
} }
static void __bm_unmap(unsigned long *p_addr, const enum km_type km) static void __bm_unmap(unsigned long *p_addr)
{ {
kunmap_atomic(p_addr, km); kunmap_atomic(p_addr);
}; };
static void bm_unmap(unsigned long *p_addr) static void bm_unmap(unsigned long *p_addr)
{ {
return __bm_unmap(p_addr, KM_IRQ1); return __bm_unmap(p_addr);
} }
/* long word offset of _bitmap_ sector */ /* long word offset of _bitmap_ sector */
@ -543,15 +543,15 @@ static unsigned long bm_count_bits(struct drbd_bitmap *b)
/* all but last page */ /* all but last page */
for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) { for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
p_addr = __bm_map_pidx(b, idx, KM_USER0); p_addr = __bm_map_pidx(b, idx);
for (i = 0; i < LWPP; i++) for (i = 0; i < LWPP; i++)
bits += hweight_long(p_addr[i]); bits += hweight_long(p_addr[i]);
__bm_unmap(p_addr, KM_USER0); __bm_unmap(p_addr);
cond_resched(); cond_resched();
} }
/* last (or only) page */ /* last (or only) page */
last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL; last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
p_addr = __bm_map_pidx(b, idx, KM_USER0); p_addr = __bm_map_pidx(b, idx);
for (i = 0; i < last_word; i++) for (i = 0; i < last_word; i++)
bits += hweight_long(p_addr[i]); bits += hweight_long(p_addr[i]);
p_addr[last_word] &= cpu_to_lel(mask); p_addr[last_word] &= cpu_to_lel(mask);
@ -559,7 +559,7 @@ static unsigned long bm_count_bits(struct drbd_bitmap *b)
/* 32bit arch, may have an unused padding long */ /* 32bit arch, may have an unused padding long */
if (BITS_PER_LONG == 32 && (last_word & 1) == 0) if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
p_addr[last_word+1] = 0; p_addr[last_word+1] = 0;
__bm_unmap(p_addr, KM_USER0); __bm_unmap(p_addr);
return bits; return bits;
} }
@ -970,11 +970,11 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
* to use pre-allocated page pool */ * to use pre-allocated page pool */
void *src, *dest; void *src, *dest;
page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT); page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT);
dest = kmap_atomic(page, KM_USER0); dest = kmap_atomic(page);
src = kmap_atomic(b->bm_pages[page_nr], KM_USER1); src = kmap_atomic(b->bm_pages[page_nr]);
memcpy(dest, src, PAGE_SIZE); memcpy(dest, src, PAGE_SIZE);
kunmap_atomic(src, KM_USER1); kunmap_atomic(src);
kunmap_atomic(dest, KM_USER0); kunmap_atomic(dest);
bm_store_page_idx(page, page_nr); bm_store_page_idx(page, page_nr);
} else } else
page = b->bm_pages[page_nr]; page = b->bm_pages[page_nr];
@ -1163,7 +1163,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
* this returns a bit number, NOT a sector! * this returns a bit number, NOT a sector!
*/ */
static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
const int find_zero_bit, const enum km_type km) const int find_zero_bit)
{ {
struct drbd_bitmap *b = mdev->bitmap; struct drbd_bitmap *b = mdev->bitmap;
unsigned long *p_addr; unsigned long *p_addr;
@ -1178,7 +1178,7 @@ static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
while (bm_fo < b->bm_bits) { while (bm_fo < b->bm_bits) {
/* bit offset of the first bit in the page */ /* bit offset of the first bit in the page */
bit_offset = bm_fo & ~BITS_PER_PAGE_MASK; bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km); p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo));
if (find_zero_bit) if (find_zero_bit)
i = find_next_zero_bit_le(p_addr, i = find_next_zero_bit_le(p_addr,
@ -1187,7 +1187,7 @@ static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
i = find_next_bit_le(p_addr, i = find_next_bit_le(p_addr,
PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
__bm_unmap(p_addr, km); __bm_unmap(p_addr);
if (i < PAGE_SIZE*8) { if (i < PAGE_SIZE*8) {
bm_fo = bit_offset + i; bm_fo = bit_offset + i;
if (bm_fo >= b->bm_bits) if (bm_fo >= b->bm_bits)
@ -1215,7 +1215,7 @@ static unsigned long bm_find_next(struct drbd_conf *mdev,
if (BM_DONT_TEST & b->bm_flags) if (BM_DONT_TEST & b->bm_flags)
bm_print_lock_info(mdev); bm_print_lock_info(mdev);
i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1); i = __bm_find_next(mdev, bm_fo, find_zero_bit);
spin_unlock_irq(&b->bm_lock); spin_unlock_irq(&b->bm_lock);
return i; return i;
@ -1239,13 +1239,13 @@ unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo
unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
{ {
/* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
return __bm_find_next(mdev, bm_fo, 0, KM_USER1); return __bm_find_next(mdev, bm_fo, 0);
} }
unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
{ {
/* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
return __bm_find_next(mdev, bm_fo, 1, KM_USER1); return __bm_find_next(mdev, bm_fo, 1);
} }
/* returns number of bits actually changed. /* returns number of bits actually changed.
@ -1273,14 +1273,14 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
unsigned int page_nr = bm_bit_to_page_idx(b, bitnr); unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
if (page_nr != last_page_nr) { if (page_nr != last_page_nr) {
if (p_addr) if (p_addr)
__bm_unmap(p_addr, KM_IRQ1); __bm_unmap(p_addr);
if (c < 0) if (c < 0)
bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
else if (c > 0) else if (c > 0)
bm_set_page_need_writeout(b->bm_pages[last_page_nr]); bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
changed_total += c; changed_total += c;
c = 0; c = 0;
p_addr = __bm_map_pidx(b, page_nr, KM_IRQ1); p_addr = __bm_map_pidx(b, page_nr);
last_page_nr = page_nr; last_page_nr = page_nr;
} }
if (val) if (val)
@ -1289,7 +1289,7 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
} }
if (p_addr) if (p_addr)
__bm_unmap(p_addr, KM_IRQ1); __bm_unmap(p_addr);
if (c < 0) if (c < 0)
bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
else if (c > 0) else if (c > 0)
@ -1342,13 +1342,13 @@ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
{ {
int i; int i;
int bits; int bits;
unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_IRQ1); unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]);
for (i = first_word; i < last_word; i++) { for (i = first_word; i < last_word; i++) {
bits = hweight_long(paddr[i]); bits = hweight_long(paddr[i]);
paddr[i] = ~0UL; paddr[i] = ~0UL;
b->bm_set += BITS_PER_LONG - bits; b->bm_set += BITS_PER_LONG - bits;
} }
kunmap_atomic(paddr, KM_IRQ1); kunmap_atomic(paddr);
} }
/* Same thing as drbd_bm_set_bits, /* Same thing as drbd_bm_set_bits,

View file

@ -2526,10 +2526,10 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
page = e->pages; page = e->pages;
page_chain_for_each(page) { page_chain_for_each(page) {
void *d = kmap_atomic(page, KM_USER0); void *d = kmap_atomic(page);
unsigned l = min_t(unsigned, len, PAGE_SIZE); unsigned l = min_t(unsigned, len, PAGE_SIZE);
memcpy(tl, d, l); memcpy(tl, d, l);
kunmap_atomic(d, KM_USER0); kunmap_atomic(d);
tl = (unsigned short*)((char*)tl + l); tl = (unsigned short*)((char*)tl + l);
len -= l; len -= l;
if (len == 0) if (len == 0)

View file

@ -93,16 +93,16 @@ static int transfer_none(struct loop_device *lo, int cmd,
struct page *loop_page, unsigned loop_off, struct page *loop_page, unsigned loop_off,
int size, sector_t real_block) int size, sector_t real_block)
{ {
char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off; char *raw_buf = kmap_atomic(raw_page) + raw_off;
char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off; char *loop_buf = kmap_atomic(loop_page) + loop_off;
if (cmd == READ) if (cmd == READ)
memcpy(loop_buf, raw_buf, size); memcpy(loop_buf, raw_buf, size);
else else
memcpy(raw_buf, loop_buf, size); memcpy(raw_buf, loop_buf, size);
kunmap_atomic(loop_buf, KM_USER1); kunmap_atomic(loop_buf);
kunmap_atomic(raw_buf, KM_USER0); kunmap_atomic(raw_buf);
cond_resched(); cond_resched();
return 0; return 0;
} }
@ -112,8 +112,8 @@ static int transfer_xor(struct loop_device *lo, int cmd,
struct page *loop_page, unsigned loop_off, struct page *loop_page, unsigned loop_off,
int size, sector_t real_block) int size, sector_t real_block)
{ {
char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off; char *raw_buf = kmap_atomic(raw_page) + raw_off;
char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off; char *loop_buf = kmap_atomic(loop_page) + loop_off;
char *in, *out, *key; char *in, *out, *key;
int i, keysize; int i, keysize;
@ -130,8 +130,8 @@ static int transfer_xor(struct loop_device *lo, int cmd,
for (i = 0; i < size; i++) for (i = 0; i < size; i++)
*out++ = *in++ ^ key[(i & 511) % keysize]; *out++ = *in++ ^ key[(i & 511) % keysize];
kunmap_atomic(loop_buf, KM_USER1); kunmap_atomic(loop_buf);
kunmap_atomic(raw_buf, KM_USER0); kunmap_atomic(raw_buf);
cond_resched(); cond_resched();
return 0; return 0;
} }

View file

@ -987,14 +987,14 @@ static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs, struct pag
while (copy_size > 0) { while (copy_size > 0) {
struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg); struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg);
void *vfrom = kmap_atomic(src_bvl->bv_page, KM_USER0) + void *vfrom = kmap_atomic(src_bvl->bv_page) +
src_bvl->bv_offset + offs; src_bvl->bv_offset + offs;
void *vto = page_address(dst_page) + dst_offs; void *vto = page_address(dst_page) + dst_offs;
int len = min_t(int, copy_size, src_bvl->bv_len - offs); int len = min_t(int, copy_size, src_bvl->bv_len - offs);
BUG_ON(len < 0); BUG_ON(len < 0);
memcpy(vto, vfrom, len); memcpy(vto, vfrom, len);
kunmap_atomic(vfrom, KM_USER0); kunmap_atomic(vfrom);
seg++; seg++;
offs = 0; offs = 0;
@ -1019,10 +1019,10 @@ static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
offs = 0; offs = 0;
for (f = 0; f < pkt->frames; f++) { for (f = 0; f < pkt->frames; f++) {
if (bvec[f].bv_page != pkt->pages[p]) { if (bvec[f].bv_page != pkt->pages[p]) {
void *vfrom = kmap_atomic(bvec[f].bv_page, KM_USER0) + bvec[f].bv_offset; void *vfrom = kmap_atomic(bvec[f].bv_page) + bvec[f].bv_offset;
void *vto = page_address(pkt->pages[p]) + offs; void *vto = page_address(pkt->pages[p]) + offs;
memcpy(vto, vfrom, CD_FRAMESIZE); memcpy(vto, vfrom, CD_FRAMESIZE);
kunmap_atomic(vfrom, KM_USER0); kunmap_atomic(vfrom);
bvec[f].bv_page = pkt->pages[p]; bvec[f].bv_page = pkt->pages[p];
bvec[f].bv_offset = offs; bvec[f].bv_offset = offs;
} else { } else {

View file

@ -1731,9 +1731,9 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset
while (size) { while (size) {
copy = min3(srest, dst->length, size); copy = min3(srest, dst->length, size);
daddr = kmap_atomic(sg_page(dst), KM_IRQ0); daddr = kmap_atomic(sg_page(dst));
memcpy(daddr + dst->offset + offset, saddr, copy); memcpy(daddr + dst->offset + offset, saddr, copy);
kunmap_atomic(daddr, KM_IRQ0); kunmap_atomic(daddr);
nbytes -= copy; nbytes -= copy;
size -= copy; size -= copy;
@ -1793,17 +1793,17 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error)
continue; continue;
} }
saddr = kmap_atomic(sg_page(t), KM_SOFTIRQ0); saddr = kmap_atomic(sg_page(t));
err = ablkcipher_get(saddr, &t->length, t->offset, err = ablkcipher_get(saddr, &t->length, t->offset,
dst, nbytes, &nbytes); dst, nbytes, &nbytes);
if (err < 0) { if (err < 0) {
kunmap_atomic(saddr, KM_SOFTIRQ0); kunmap_atomic(saddr);
break; break;
} }
idx += err; idx += err;
kunmap_atomic(saddr, KM_SOFTIRQ0); kunmap_atomic(saddr);
} }
hifn_cipher_walk_exit(&rctx->walk); hifn_cipher_walk_exit(&rctx->walk);

View file

@ -620,13 +620,13 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
if (PageHighMem(pg)) if (PageHighMem(pg))
local_irq_save(flags); local_irq_save(flags);
virt_addr = kmap_atomic(pg, KM_BOUNCE_READ); virt_addr = kmap_atomic(pg);
/* Perform architecture specific atomic scrub operation */ /* Perform architecture specific atomic scrub operation */
atomic_scrub(virt_addr + offset, size); atomic_scrub(virt_addr + offset, size);
/* Unmap and complete */ /* Unmap and complete */
kunmap_atomic(virt_addr, KM_BOUNCE_READ); kunmap_atomic(virt_addr);
if (PageHighMem(pg)) if (PageHighMem(pg))
local_irq_restore(flags); local_irq_restore(flags);

View file

@ -41,10 +41,10 @@ drm_clflush_page(struct page *page)
if (unlikely(page == NULL)) if (unlikely(page == NULL))
return; return;
page_virtual = kmap_atomic(page, KM_USER0); page_virtual = kmap_atomic(page);
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
clflush(page_virtual + i); clflush(page_virtual + i);
kunmap_atomic(page_virtual, KM_USER0); kunmap_atomic(page_virtual);
} }
static void drm_cache_flush_clflush(struct page *pages[], static void drm_cache_flush_clflush(struct page *pages[],
@ -87,10 +87,10 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
if (unlikely(page == NULL)) if (unlikely(page == NULL))
continue; continue;
page_virtual = kmap_atomic(page, KM_USER0); page_virtual = kmap_atomic(page);
flush_dcache_range((unsigned long)page_virtual, flush_dcache_range((unsigned long)page_virtual,
(unsigned long)page_virtual + PAGE_SIZE); (unsigned long)page_virtual + PAGE_SIZE);
kunmap_atomic(page_virtual, KM_USER0); kunmap_atomic(page_virtual);
} }
#else #else
printk(KERN_ERR "Architecture has no drm_cache.c support\n"); printk(KERN_ERR "Architecture has no drm_cache.c support\n");

View file

@ -125,14 +125,14 @@ static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
int i; int i;
uint8_t *clf; uint8_t *clf;
clf = kmap_atomic(page, KM_USER0); clf = kmap_atomic(page);
mb(); mb();
for (i = 0; i < clflush_count; ++i) { for (i = 0; i < clflush_count; ++i) {
psb_clflush(clf); psb_clflush(clf);
clf += clflush_add; clf += clflush_add;
} }
mb(); mb();
kunmap_atomic(clf, KM_USER0); kunmap_atomic(clf);
} }
static void psb_pages_clflush(struct psb_mmu_driver *driver, static void psb_pages_clflush(struct psb_mmu_driver *driver,
@ -325,7 +325,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
spin_lock(lock); spin_lock(lock);
v = kmap_atomic(pt->p, KM_USER0); v = kmap_atomic(pt->p);
clf = (uint8_t *) v; clf = (uint8_t *) v;
ptes = (uint32_t *) v; ptes = (uint32_t *) v;
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
@ -341,7 +341,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
mb(); mb();
} }
kunmap_atomic(v, KM_USER0); kunmap_atomic(v);
spin_unlock(lock); spin_unlock(lock);
pt->count = 0; pt->count = 0;
@ -376,18 +376,18 @@ struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
continue; continue;
} }
v = kmap_atomic(pd->p, KM_USER0); v = kmap_atomic(pd->p);
pd->tables[index] = pt; pd->tables[index] = pt;
v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask; v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
pt->index = index; pt->index = index;
kunmap_atomic((void *) v, KM_USER0); kunmap_atomic((void *) v);
if (pd->hw_context != -1) { if (pd->hw_context != -1) {
psb_mmu_clflush(pd->driver, (void *) &v[index]); psb_mmu_clflush(pd->driver, (void *) &v[index]);
atomic_set(&pd->driver->needs_tlbflush, 1); atomic_set(&pd->driver->needs_tlbflush, 1);
} }
} }
pt->v = kmap_atomic(pt->p, KM_USER0); pt->v = kmap_atomic(pt->p);
return pt; return pt;
} }
@ -404,7 +404,7 @@ static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
spin_unlock(lock); spin_unlock(lock);
return NULL; return NULL;
} }
pt->v = kmap_atomic(pt->p, KM_USER0); pt->v = kmap_atomic(pt->p);
return pt; return pt;
} }
@ -413,9 +413,9 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
struct psb_mmu_pd *pd = pt->pd; struct psb_mmu_pd *pd = pt->pd;
uint32_t *v; uint32_t *v;
kunmap_atomic(pt->v, KM_USER0); kunmap_atomic(pt->v);
if (pt->count == 0) { if (pt->count == 0) {
v = kmap_atomic(pd->p, KM_USER0); v = kmap_atomic(pd->p);
v[pt->index] = pd->invalid_pde; v[pt->index] = pd->invalid_pde;
pd->tables[pt->index] = NULL; pd->tables[pt->index] = NULL;
@ -424,7 +424,7 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
(void *) &v[pt->index]); (void *) &v[pt->index]);
atomic_set(&pd->driver->needs_tlbflush, 1); atomic_set(&pd->driver->needs_tlbflush, 1);
} }
kunmap_atomic(pt->v, KM_USER0); kunmap_atomic(pt->v);
spin_unlock(&pd->driver->lock); spin_unlock(&pd->driver->lock);
psb_mmu_free_pt(pt); psb_mmu_free_pt(pt);
return; return;
@ -457,7 +457,7 @@ void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
down_read(&driver->sem); down_read(&driver->sem);
spin_lock(&driver->lock); spin_lock(&driver->lock);
v = kmap_atomic(pd->p, KM_USER0); v = kmap_atomic(pd->p);
v += start; v += start;
while (gtt_pages--) { while (gtt_pages--) {
@ -467,7 +467,7 @@ void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
/*ttm_tt_cache_flush(&pd->p, num_pages);*/ /*ttm_tt_cache_flush(&pd->p, num_pages);*/
psb_pages_clflush(pd->driver, &pd->p, num_pages); psb_pages_clflush(pd->driver, &pd->p, num_pages);
kunmap_atomic(v, KM_USER0); kunmap_atomic(v);
spin_unlock(&driver->lock); spin_unlock(&driver->lock);
if (pd->hw_context != -1) if (pd->hw_context != -1)
@ -830,9 +830,9 @@ int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
uint32_t *v; uint32_t *v;
spin_lock(lock); spin_lock(lock);
v = kmap_atomic(pd->p, KM_USER0); v = kmap_atomic(pd->p);
tmp = v[psb_mmu_pd_index(virtual)]; tmp = v[psb_mmu_pd_index(virtual)];
kunmap_atomic(v, KM_USER0); kunmap_atomic(v);
spin_unlock(lock); spin_unlock(lock);
if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) || if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||

View file

@ -309,11 +309,11 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
goto out_err; goto out_err;
preempt_disable(); preempt_disable();
from_virtual = kmap_atomic(from_page, KM_USER0); from_virtual = kmap_atomic(from_page);
to_virtual = kmap_atomic(to_page, KM_USER1); to_virtual = kmap_atomic(to_page);
memcpy(to_virtual, from_virtual, PAGE_SIZE); memcpy(to_virtual, from_virtual, PAGE_SIZE);
kunmap_atomic(to_virtual, KM_USER1); kunmap_atomic(to_virtual);
kunmap_atomic(from_virtual, KM_USER0); kunmap_atomic(from_virtual);
preempt_enable(); preempt_enable();
page_cache_release(from_page); page_cache_release(from_page);
} }
@ -365,11 +365,11 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
goto out_err; goto out_err;
} }
preempt_disable(); preempt_disable();
from_virtual = kmap_atomic(from_page, KM_USER0); from_virtual = kmap_atomic(from_page);
to_virtual = kmap_atomic(to_page, KM_USER1); to_virtual = kmap_atomic(to_page);
memcpy(to_virtual, from_virtual, PAGE_SIZE); memcpy(to_virtual, from_virtual, PAGE_SIZE);
kunmap_atomic(to_virtual, KM_USER1); kunmap_atomic(to_virtual);
kunmap_atomic(from_virtual, KM_USER0); kunmap_atomic(from_virtual);
preempt_enable(); preempt_enable();
set_page_dirty(to_page); set_page_dirty(to_page);
mark_page_accessed(to_page); mark_page_accessed(to_page);

View file

@ -136,10 +136,10 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
if (likely(page_virtual != NULL)) { if (likely(page_virtual != NULL)) {
desc_virtual->ppn = page_to_pfn(page); desc_virtual->ppn = page_to_pfn(page);
kunmap_atomic(page_virtual, KM_USER0); kunmap_atomic(page_virtual);
} }
page_virtual = kmap_atomic(page, KM_USER0); page_virtual = kmap_atomic(page);
desc_virtual = page_virtual - 1; desc_virtual = page_virtual - 1;
prev_pfn = ~(0UL); prev_pfn = ~(0UL);
@ -169,7 +169,7 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
} }
if (likely(page_virtual != NULL)) if (likely(page_virtual != NULL))
kunmap_atomic(page_virtual, KM_USER0); kunmap_atomic(page_virtual);
return 0; return 0;
out_err: out_err:

View file

@ -253,7 +253,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
if (page_is_high) if (page_is_high)
local_irq_save(flags); local_irq_save(flags);
buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset; buf = kmap_atomic(page) + offset;
cmd->nleft -= nr_bytes; cmd->nleft -= nr_bytes;
cmd->cursg_ofs += nr_bytes; cmd->cursg_ofs += nr_bytes;
@ -269,7 +269,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
else else
hwif->tp_ops->input_data(drive, cmd, buf, nr_bytes); hwif->tp_ops->input_data(drive, cmd, buf, nr_bytes);
kunmap_atomic(buf, KM_BIO_SRC_IRQ); kunmap_atomic(buf);
if (page_is_high) if (page_is_high)
local_irq_restore(flags); local_irq_restore(flags);

View file

@ -73,11 +73,11 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
p = mem; p = mem;
for_each_sg(sgl, sg, data->size, i) { for_each_sg(sgl, sg, data->size, i) {
from = kmap_atomic(sg_page(sg), KM_USER0); from = kmap_atomic(sg_page(sg));
memcpy(p, memcpy(p,
from + sg->offset, from + sg->offset,
sg->length); sg->length);
kunmap_atomic(from, KM_USER0); kunmap_atomic(from);
p += sg->length; p += sg->length;
} }
} }
@ -133,11 +133,11 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
p = mem; p = mem;
for_each_sg(sgl, sg, sg_size, i) { for_each_sg(sgl, sg, sg_size, i) {
to = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); to = kmap_atomic(sg_page(sg));
memcpy(to + sg->offset, memcpy(to + sg->offset,
p, p,
sg->length); sg->length);
kunmap_atomic(to, KM_SOFTIRQ0); kunmap_atomic(to);
p += sg->length; p += sg->length;
} }
} }

View file

@ -457,7 +457,7 @@ void bitmap_update_sb(struct bitmap *bitmap)
return; return;
} }
spin_unlock_irqrestore(&bitmap->lock, flags); spin_unlock_irqrestore(&bitmap->lock, flags);
sb = kmap_atomic(bitmap->sb_page, KM_USER0); sb = kmap_atomic(bitmap->sb_page);
sb->events = cpu_to_le64(bitmap->mddev->events); sb->events = cpu_to_le64(bitmap->mddev->events);
if (bitmap->mddev->events < bitmap->events_cleared) if (bitmap->mddev->events < bitmap->events_cleared)
/* rocking back to read-only */ /* rocking back to read-only */
@ -467,7 +467,7 @@ void bitmap_update_sb(struct bitmap *bitmap)
/* Just in case these have been changed via sysfs: */ /* Just in case these have been changed via sysfs: */
sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ); sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind); sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
kunmap_atomic(sb, KM_USER0); kunmap_atomic(sb);
write_page(bitmap, bitmap->sb_page, 1); write_page(bitmap, bitmap->sb_page, 1);
} }
@ -478,7 +478,7 @@ void bitmap_print_sb(struct bitmap *bitmap)
if (!bitmap || !bitmap->sb_page) if (!bitmap || !bitmap->sb_page)
return; return;
sb = kmap_atomic(bitmap->sb_page, KM_USER0); sb = kmap_atomic(bitmap->sb_page);
printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap)); printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic)); printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic));
printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version)); printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version));
@ -497,7 +497,7 @@ void bitmap_print_sb(struct bitmap *bitmap)
printk(KERN_DEBUG " sync size: %llu KB\n", printk(KERN_DEBUG " sync size: %llu KB\n",
(unsigned long long)le64_to_cpu(sb->sync_size)/2); (unsigned long long)le64_to_cpu(sb->sync_size)/2);
printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind)); printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind));
kunmap_atomic(sb, KM_USER0); kunmap_atomic(sb);
} }
/* /*
@ -525,7 +525,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
} }
bitmap->sb_page->index = 0; bitmap->sb_page->index = 0;
sb = kmap_atomic(bitmap->sb_page, KM_USER0); sb = kmap_atomic(bitmap->sb_page);
sb->magic = cpu_to_le32(BITMAP_MAGIC); sb->magic = cpu_to_le32(BITMAP_MAGIC);
sb->version = cpu_to_le32(BITMAP_MAJOR_HI); sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
@ -533,7 +533,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
chunksize = bitmap->mddev->bitmap_info.chunksize; chunksize = bitmap->mddev->bitmap_info.chunksize;
BUG_ON(!chunksize); BUG_ON(!chunksize);
if (!is_power_of_2(chunksize)) { if (!is_power_of_2(chunksize)) {
kunmap_atomic(sb, KM_USER0); kunmap_atomic(sb);
printk(KERN_ERR "bitmap chunksize not a power of 2\n"); printk(KERN_ERR "bitmap chunksize not a power of 2\n");
return -EINVAL; return -EINVAL;
} }
@ -571,7 +571,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
bitmap->flags |= BITMAP_HOSTENDIAN; bitmap->flags |= BITMAP_HOSTENDIAN;
sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN); sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN);
kunmap_atomic(sb, KM_USER0); kunmap_atomic(sb);
return 0; return 0;
} }
@ -603,7 +603,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
return err; return err;
} }
sb = kmap_atomic(bitmap->sb_page, KM_USER0); sb = kmap_atomic(bitmap->sb_page);
chunksize = le32_to_cpu(sb->chunksize); chunksize = le32_to_cpu(sb->chunksize);
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
@ -664,7 +664,7 @@ success:
bitmap->events_cleared = bitmap->mddev->events; bitmap->events_cleared = bitmap->mddev->events;
err = 0; err = 0;
out: out:
kunmap_atomic(sb, KM_USER0); kunmap_atomic(sb);
if (err) if (err)
bitmap_print_sb(bitmap); bitmap_print_sb(bitmap);
return err; return err;
@ -689,7 +689,7 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
return 0; return 0;
} }
spin_unlock_irqrestore(&bitmap->lock, flags); spin_unlock_irqrestore(&bitmap->lock, flags);
sb = kmap_atomic(bitmap->sb_page, KM_USER0); sb = kmap_atomic(bitmap->sb_page);
old = le32_to_cpu(sb->state) & bits; old = le32_to_cpu(sb->state) & bits;
switch (op) { switch (op) {
case MASK_SET: case MASK_SET:
@ -703,7 +703,7 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
default: default:
BUG(); BUG();
} }
kunmap_atomic(sb, KM_USER0); kunmap_atomic(sb);
return old; return old;
} }
@ -881,12 +881,12 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
bit = file_page_offset(bitmap, chunk); bit = file_page_offset(bitmap, chunk);
/* set the bit */ /* set the bit */
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page);
if (bitmap->flags & BITMAP_HOSTENDIAN) if (bitmap->flags & BITMAP_HOSTENDIAN)
set_bit(bit, kaddr); set_bit(bit, kaddr);
else else
__set_bit_le(bit, kaddr); __set_bit_le(bit, kaddr);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
pr_debug("set file bit %lu page %lu\n", bit, page->index); pr_debug("set file bit %lu page %lu\n", bit, page->index);
/* record page number so it gets flushed to disk when unplug occurs */ /* record page number so it gets flushed to disk when unplug occurs */
set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
@ -1050,10 +1050,10 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
* if bitmap is out of date, dirty the * if bitmap is out of date, dirty the
* whole page and write it out * whole page and write it out
*/ */
paddr = kmap_atomic(page, KM_USER0); paddr = kmap_atomic(page);
memset(paddr + offset, 0xff, memset(paddr + offset, 0xff,
PAGE_SIZE - offset); PAGE_SIZE - offset);
kunmap_atomic(paddr, KM_USER0); kunmap_atomic(paddr);
write_page(bitmap, page, 1); write_page(bitmap, page, 1);
ret = -EIO; ret = -EIO;
@ -1061,12 +1061,12 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
goto err; goto err;
} }
} }
paddr = kmap_atomic(page, KM_USER0); paddr = kmap_atomic(page);
if (bitmap->flags & BITMAP_HOSTENDIAN) if (bitmap->flags & BITMAP_HOSTENDIAN)
b = test_bit(bit, paddr); b = test_bit(bit, paddr);
else else
b = test_bit_le(bit, paddr); b = test_bit_le(bit, paddr);
kunmap_atomic(paddr, KM_USER0); kunmap_atomic(paddr);
if (b) { if (b) {
/* if the disk bit is set, set the memory bit */ /* if the disk bit is set, set the memory bit */
int needed = ((sector_t)(i+1) << (CHUNK_BLOCK_SHIFT(bitmap)) int needed = ((sector_t)(i+1) << (CHUNK_BLOCK_SHIFT(bitmap))
@ -1209,10 +1209,10 @@ void bitmap_daemon_work(struct mddev *mddev)
mddev->bitmap_info.external == 0) { mddev->bitmap_info.external == 0) {
bitmap_super_t *sb; bitmap_super_t *sb;
bitmap->need_sync = 0; bitmap->need_sync = 0;
sb = kmap_atomic(bitmap->sb_page, KM_USER0); sb = kmap_atomic(bitmap->sb_page);
sb->events_cleared = sb->events_cleared =
cpu_to_le64(bitmap->events_cleared); cpu_to_le64(bitmap->events_cleared);
kunmap_atomic(sb, KM_USER0); kunmap_atomic(sb);
write_page(bitmap, bitmap->sb_page, 1); write_page(bitmap, bitmap->sb_page, 1);
} }
spin_lock_irqsave(&bitmap->lock, flags); spin_lock_irqsave(&bitmap->lock, flags);
@ -1235,7 +1235,7 @@ void bitmap_daemon_work(struct mddev *mddev)
-1); -1);
/* clear the bit */ /* clear the bit */
paddr = kmap_atomic(page, KM_USER0); paddr = kmap_atomic(page);
if (bitmap->flags & BITMAP_HOSTENDIAN) if (bitmap->flags & BITMAP_HOSTENDIAN)
clear_bit(file_page_offset(bitmap, j), clear_bit(file_page_offset(bitmap, j),
paddr); paddr);
@ -1244,7 +1244,7 @@ void bitmap_daemon_work(struct mddev *mddev)
file_page_offset(bitmap, file_page_offset(bitmap,
j), j),
paddr); paddr);
kunmap_atomic(paddr, KM_USER0); kunmap_atomic(paddr);
} else if (*bmc <= 2) { } else if (*bmc <= 2) {
*bmc = 1; /* maybe clear the bit next time */ *bmc = 1; /* maybe clear the bit next time */
set_page_attr(bitmap, page, BITMAP_PAGE_PENDING); set_page_attr(bitmap, page, BITMAP_PAGE_PENDING);

View file

@ -590,9 +590,9 @@ static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
int r = 0; int r = 0;
if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
src = kmap_atomic(sg_page(&dmreq->sg_in), KM_USER0); src = kmap_atomic(sg_page(&dmreq->sg_in));
r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset); r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
kunmap_atomic(src, KM_USER0); kunmap_atomic(src);
} else } else
memset(iv, 0, cc->iv_size); memset(iv, 0, cc->iv_size);
@ -608,14 +608,14 @@ static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
return 0; return 0;
dst = kmap_atomic(sg_page(&dmreq->sg_out), KM_USER0); dst = kmap_atomic(sg_page(&dmreq->sg_out));
r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset); r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
/* Tweak the first block of plaintext sector */ /* Tweak the first block of plaintext sector */
if (!r) if (!r)
crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size); crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
kunmap_atomic(dst, KM_USER0); kunmap_atomic(dst);
return r; return r;
} }

View file

@ -57,9 +57,9 @@ int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info
if (dma->bouncemap[map_offset] == NULL) if (dma->bouncemap[map_offset] == NULL)
return -1; return -1;
local_irq_save(flags); local_irq_save(flags);
src = kmap_atomic(dma->map[map_offset], KM_BOUNCE_READ) + offset; src = kmap_atomic(dma->map[map_offset]) + offset;
memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len); memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
kunmap_atomic(src, KM_BOUNCE_READ); kunmap_atomic(src);
local_irq_restore(flags); local_irq_restore(flags);
sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset); sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset);
} }

View file

@ -325,7 +325,7 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
p_cnt = min(p_cnt, length); p_cnt = min(p_cnt, length);
local_irq_save(flags); local_irq_save(flags);
buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + p_off; buf = kmap_atomic(pg) + p_off;
} else { } else {
buf = host->req->data + host->block_pos; buf = host->req->data + host->block_pos;
p_cnt = host->req->data_len - host->block_pos; p_cnt = host->req->data_len - host->block_pos;
@ -341,7 +341,7 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
: jmb38x_ms_read_reg_data(host, buf, p_cnt); : jmb38x_ms_read_reg_data(host, buf, p_cnt);
if (host->req->long_data) { if (host->req->long_data) {
kunmap_atomic(buf - p_off, KM_BIO_SRC_IRQ); kunmap_atomic(buf - p_off);
local_irq_restore(flags); local_irq_restore(flags);
} }

View file

@ -210,7 +210,7 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host)
p_cnt = min(p_cnt, length); p_cnt = min(p_cnt, length);
local_irq_save(flags); local_irq_save(flags);
buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + p_off; buf = kmap_atomic(pg) + p_off;
} else { } else {
buf = host->req->data + host->block_pos; buf = host->req->data + host->block_pos;
p_cnt = host->req->data_len - host->block_pos; p_cnt = host->req->data_len - host->block_pos;
@ -221,7 +221,7 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host)
: tifm_ms_read_data(host, buf, p_cnt); : tifm_ms_read_data(host, buf, p_cnt);
if (host->req->long_data) { if (host->req->long_data) {
kunmap_atomic(buf - p_off, KM_BIO_SRC_IRQ); kunmap_atomic(buf - p_off);
local_irq_restore(flags); local_irq_restore(flags);
} }

View file

@ -4101,11 +4101,9 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
if (length <= copybreak && if (length <= copybreak &&
skb_tailroom(skb) >= length) { skb_tailroom(skb) >= length) {
u8 *vaddr; u8 *vaddr;
vaddr = kmap_atomic(buffer_info->page, vaddr = kmap_atomic(buffer_info->page);
KM_SKB_DATA_SOFTIRQ);
memcpy(skb_tail_pointer(skb), vaddr, length); memcpy(skb_tail_pointer(skb), vaddr, length);
kunmap_atomic(vaddr, kunmap_atomic(vaddr);
KM_SKB_DATA_SOFTIRQ);
/* re-use the page, so don't erase /* re-use the page, so don't erase
* buffer_info->page */ * buffer_info->page */
skb_put(skb, length); skb_put(skb, length);

View file

@ -1301,10 +1301,9 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
ps_page->dma, ps_page->dma,
PAGE_SIZE, PAGE_SIZE,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
vaddr = kmap_atomic(ps_page->page, vaddr = kmap_atomic(ps_page->page);
KM_SKB_DATA_SOFTIRQ);
memcpy(skb_tail_pointer(skb), vaddr, l1); memcpy(skb_tail_pointer(skb), vaddr, l1);
kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); kunmap_atomic(vaddr);
dma_sync_single_for_device(&pdev->dev, dma_sync_single_for_device(&pdev->dev,
ps_page->dma, ps_page->dma,
PAGE_SIZE, PAGE_SIZE,
@ -1503,12 +1502,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
if (length <= copybreak && if (length <= copybreak &&
skb_tailroom(skb) >= length) { skb_tailroom(skb) >= length) {
u8 *vaddr; u8 *vaddr;
vaddr = kmap_atomic(buffer_info->page, vaddr = kmap_atomic(buffer_info->page);
KM_SKB_DATA_SOFTIRQ);
memcpy(skb_tail_pointer(skb), vaddr, memcpy(skb_tail_pointer(skb), vaddr,
length); length);
kunmap_atomic(vaddr, kunmap_atomic(vaddr);
KM_SKB_DATA_SOFTIRQ);
/* re-use the page, so don't erase /* re-use the page, so don't erase
* buffer_info->page */ * buffer_info->page */
skb_put(skb, length); skb_put(skb, length);

View file

@ -104,8 +104,8 @@
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ) #define cas_page_map(x) kmap_atomic((x))
#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ) #define cas_page_unmap(x) kunmap_atomic((x))
#define CAS_NCPUS num_online_cpus() #define CAS_NCPUS num_online_cpus()
#define cas_skb_release(x) netif_rx(x) #define cas_skb_release(x) netif_rx(x)

View file

@ -1736,7 +1736,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
(uint32_t ) cmd->cmnd[8]; (uint32_t ) cmd->cmnd[8];
/* 4 bytes: Areca io control code */ /* 4 bytes: Areca io control code */
sg = scsi_sglist(cmd); sg = scsi_sglist(cmd);
buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset; buffer = kmap_atomic(sg_page(sg)) + sg->offset;
if (scsi_sg_count(cmd) > 1) { if (scsi_sg_count(cmd) > 1) {
retvalue = ARCMSR_MESSAGE_FAIL; retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out; goto message_out;
@ -1985,7 +1985,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
} }
message_out: message_out:
sg = scsi_sglist(cmd); sg = scsi_sglist(cmd);
kunmap_atomic(buffer - sg->offset, KM_IRQ0); kunmap_atomic(buffer - sg->offset);
return retvalue; return retvalue;
} }
@ -2035,11 +2035,11 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
strncpy(&inqdata[32], "R001", 4); /* Product Revision */ strncpy(&inqdata[32], "R001", 4); /* Product Revision */
sg = scsi_sglist(cmd); sg = scsi_sglist(cmd);
buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset; buffer = kmap_atomic(sg_page(sg)) + sg->offset;
memcpy(buffer, inqdata, sizeof(inqdata)); memcpy(buffer, inqdata, sizeof(inqdata));
sg = scsi_sglist(cmd); sg = scsi_sglist(cmd);
kunmap_atomic(buffer - sg->offset, KM_IRQ0); kunmap_atomic(buffer - sg->offset);
cmd->scsi_done(cmd); cmd->scsi_done(cmd);
} }

View file

@ -322,8 +322,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
return -ENOMEM; return -ENOMEM;
} }
frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
cp = kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ) cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
+ frag->page_offset;
} else { } else {
cp = (struct fcoe_crc_eof *)skb_put(skb, tlen); cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
} }
@ -332,7 +331,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
cp->fcoe_eof = eof; cp->fcoe_eof = eof;
cp->fcoe_crc32 = cpu_to_le32(~crc); cp->fcoe_crc32 = cpu_to_le32(~crc);
if (skb_is_nonlinear(skb)) { if (skb_is_nonlinear(skb)) {
kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ); kunmap_atomic(cp);
cp = NULL; cp = NULL;
} }

View file

@ -1956,12 +1956,11 @@ int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
/* data fits in the skb's headroom */ /* data fits in the skb's headroom */
for (i = 0; i < tdata->nr_frags; i++, frag++) { for (i = 0; i < tdata->nr_frags; i++, frag++) {
char *src = kmap_atomic(frag->page, char *src = kmap_atomic(frag->page);
KM_SOFTIRQ0);
memcpy(dst, src+frag->offset, frag->size); memcpy(dst, src+frag->offset, frag->size);
dst += frag->size; dst += frag->size;
kunmap_atomic(src, KM_SOFTIRQ0); kunmap_atomic(src);
} }
if (padlen) { if (padlen) {
memset(dst, 0, padlen); memset(dst, 0, padlen);

View file

@ -1515,7 +1515,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
return -ENOMEM; return -ENOMEM;
} }
frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
cp = kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ) cp = kmap_atomic(skb_frag_page(frag))
+ frag->page_offset; + frag->page_offset;
} else { } else {
cp = (struct fcoe_crc_eof *)skb_put(skb, tlen); cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
@ -1526,7 +1526,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
cp->fcoe_crc32 = cpu_to_le32(~crc); cp->fcoe_crc32 = cpu_to_le32(~crc);
if (skb_is_nonlinear(skb)) { if (skb_is_nonlinear(skb)) {
kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ); kunmap_atomic(cp);
cp = NULL; cp = NULL;
} }

View file

@ -210,10 +210,9 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
while (len > 0) { while (len > 0) {
clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
data = kmap_atomic( data = kmap_atomic(
skb_frag_page(frag) + (off >> PAGE_SHIFT), skb_frag_page(frag) + (off >> PAGE_SHIFT));
KM_SKB_DATA_SOFTIRQ);
crc = crc32(crc, data + (off & ~PAGE_MASK), clen); crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ); kunmap_atomic(data);
off += clen; off += clen;
len -= clen; len -= clen;
} }

View file

@ -2310,10 +2310,10 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
return; return;
} }
local_irq_save(flags); local_irq_save(flags);
address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset; address = kmap_atomic(sg_page(sl)) + sl->offset;
memcpy(address, buffer, cpnow); memcpy(address, buffer, cpnow);
flush_dcache_page(sg_page(sl)); flush_dcache_page(sg_page(sl));
kunmap_atomic(address, KM_BIO_SRC_IRQ); kunmap_atomic(address);
local_irq_restore(flags); local_irq_restore(flags);
if (cpsum == cpcount) if (cpsum == cpcount)
break; break;

View file

@ -1511,14 +1511,14 @@ static int ips_is_passthru(struct scsi_cmnd *SC)
/* kmap_atomic() ensures addressability of the user buffer.*/ /* kmap_atomic() ensures addressability of the user buffer.*/
/* local_irq_save() protects the KM_IRQ0 address slot. */ /* local_irq_save() protects the KM_IRQ0 address slot. */
local_irq_save(flags); local_irq_save(flags);
buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset; buffer = kmap_atomic(sg_page(sg)) + sg->offset;
if (buffer && buffer[0] == 'C' && buffer[1] == 'O' && if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
buffer[2] == 'P' && buffer[3] == 'P') { buffer[2] == 'P' && buffer[3] == 'P') {
kunmap_atomic(buffer - sg->offset, KM_IRQ0); kunmap_atomic(buffer - sg->offset);
local_irq_restore(flags); local_irq_restore(flags);
return 1; return 1;
} }
kunmap_atomic(buffer - sg->offset, KM_IRQ0); kunmap_atomic(buffer - sg->offset);
local_irq_restore(flags); local_irq_restore(flags);
} }
return 0; return 0;

View file

@ -1304,9 +1304,9 @@ sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
struct page *page = sg_page(sg); struct page *page = sg_page(sg);
copy_len = min_t(int, total_len, sg_dma_len(sg)); copy_len = min_t(int, total_len, sg_dma_len(sg));
kaddr = kmap_atomic(page, KM_IRQ0); kaddr = kmap_atomic(page);
memcpy(kaddr + sg->offset, src_addr, copy_len); memcpy(kaddr + sg->offset, src_addr, copy_len);
kunmap_atomic(kaddr, KM_IRQ0); kunmap_atomic(kaddr);
total_len -= copy_len; total_len -= copy_len;
src_addr += copy_len; src_addr += copy_len;
sg = sg_next(sg); sg = sg_next(sg);
@ -1654,7 +1654,7 @@ sci_io_request_frame_handler(struct isci_request *ireq,
sci_unsolicited_frame_control_get_header(&ihost->uf_control, sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index, frame_index,
&frame_header); &frame_header);
kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); kaddr = kmap_atomic(sg_page(sg));
rsp = kaddr + sg->offset; rsp = kaddr + sg->offset;
sci_swab32_cpy(rsp, frame_header, 1); sci_swab32_cpy(rsp, frame_header, 1);
@ -1691,7 +1691,7 @@ sci_io_request_frame_handler(struct isci_request *ireq,
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
} }
kunmap_atomic(kaddr, KM_IRQ0); kunmap_atomic(kaddr);
sci_controller_release_frame(ihost, frame_index); sci_controller_release_frame(ihost, frame_index);
@ -3023,10 +3023,10 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE); dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
/* need to swab it back in case the command buffer is re-used */ /* need to swab it back in case the command buffer is re-used */
kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); kaddr = kmap_atomic(sg_page(sg));
smp_req = kaddr + sg->offset; smp_req = kaddr + sg->offset;
sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
kunmap_atomic(kaddr, KM_IRQ0); kunmap_atomic(kaddr);
break; break;
} }
default: default:
@ -3311,7 +3311,7 @@ sci_io_request_construct_smp(struct device *dev,
u8 req_len; u8 req_len;
u32 cmd; u32 cmd;
kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); kaddr = kmap_atomic(sg_page(sg));
smp_req = kaddr + sg->offset; smp_req = kaddr + sg->offset;
/* /*
* Look at the SMP requests' header fields; for certain SAS 1.x SMP * Look at the SMP requests' header fields; for certain SAS 1.x SMP
@ -3337,7 +3337,7 @@ sci_io_request_construct_smp(struct device *dev,
req_len = smp_req->req_len; req_len = smp_req->req_len;
sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
cmd = *(u32 *) smp_req; cmd = *(u32 *) smp_req;
kunmap_atomic(kaddr, KM_IRQ0); kunmap_atomic(kaddr);
if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
return SCI_FAILURE; return SCI_FAILURE;

View file

@ -485,11 +485,11 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) { if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents, copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
&offset, KM_SOFTIRQ0, NULL); &offset, NULL);
} else { } else {
crc = crc32(~0, (u8 *) fh, sizeof(*fh)); crc = crc32(~0, (u8 *) fh, sizeof(*fh));
copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents, copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
&offset, KM_SOFTIRQ0, &crc); &offset, &crc);
buf = fc_frame_payload_get(fp, 0); buf = fc_frame_payload_get(fp, 0);
if (len % 4) if (len % 4)
crc = crc32(crc, buf + len, 4 - (len % 4)); crc = crc32(crc, buf + len, 4 - (len % 4));
@ -650,10 +650,10 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
* The scatterlist item may be bigger than PAGE_SIZE, * The scatterlist item may be bigger than PAGE_SIZE,
* but we must not cross pages inside the kmap. * but we must not cross pages inside the kmap.
*/ */
page_addr = kmap_atomic(page, KM_SOFTIRQ0); page_addr = kmap_atomic(page);
memcpy(data, (char *)page_addr + (off & ~PAGE_MASK), memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
sg_bytes); sg_bytes);
kunmap_atomic(page_addr, KM_SOFTIRQ0); kunmap_atomic(page_addr);
data += sg_bytes; data += sg_bytes;
} }
offset += sg_bytes; offset += sg_bytes;

View file

@ -105,14 +105,13 @@ module_exit(libfc_exit);
* @sg: pointer to the pointer of the SG list. * @sg: pointer to the pointer of the SG list.
* @nents: pointer to the remaining number of entries in the SG list. * @nents: pointer to the remaining number of entries in the SG list.
* @offset: pointer to the current offset in the SG list. * @offset: pointer to the current offset in the SG list.
* @km_type: dedicated page table slot type for kmap_atomic.
* @crc: pointer to the 32-bit crc value. * @crc: pointer to the 32-bit crc value.
* If crc is NULL, CRC is not calculated. * If crc is NULL, CRC is not calculated.
*/ */
u32 fc_copy_buffer_to_sglist(void *buf, size_t len, u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
struct scatterlist *sg, struct scatterlist *sg,
u32 *nents, size_t *offset, u32 *nents, size_t *offset,
enum km_type km_type, u32 *crc) u32 *crc)
{ {
size_t remaining = len; size_t remaining = len;
u32 copy_len = 0; u32 copy_len = 0;
@ -142,12 +141,11 @@ u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
off = *offset + sg->offset; off = *offset + sg->offset;
sg_bytes = min(sg_bytes, sg_bytes = min(sg_bytes,
(size_t)(PAGE_SIZE - (off & ~PAGE_MASK))); (size_t)(PAGE_SIZE - (off & ~PAGE_MASK)));
page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT), page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT));
km_type);
if (crc) if (crc)
*crc = crc32(*crc, buf, sg_bytes); *crc = crc32(*crc, buf, sg_bytes);
memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, sg_bytes); memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, sg_bytes);
kunmap_atomic(page_addr, km_type); kunmap_atomic(page_addr);
buf += sg_bytes; buf += sg_bytes;
*offset += sg_bytes; *offset += sg_bytes;
remaining -= sg_bytes; remaining -= sg_bytes;

View file

@ -134,6 +134,6 @@ extern void fc_fc4_conf_lport_params(struct fc_lport *, enum fc_fh_type);
u32 fc_copy_buffer_to_sglist(void *buf, size_t len, u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
struct scatterlist *sg, struct scatterlist *sg,
u32 *nents, size_t *offset, u32 *nents, size_t *offset,
enum km_type km_type, u32 *crc); u32 *crc);
#endif /* _FC_LIBFC_H_ */ #endif /* _FC_LIBFC_H_ */

View file

@ -1698,7 +1698,7 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
job->reply->reply_payload_rcv_len += job->reply->reply_payload_rcv_len +=
fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents, fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
&info->offset, KM_BIO_SRC_IRQ, NULL); &info->offset, NULL);
if (fr_eof(fp) == FC_EOF_T && if (fr_eof(fp) == FC_EOF_T &&
(ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==

View file

@ -135,7 +135,7 @@ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
if (recv) { if (recv) {
segment->atomic_mapped = true; segment->atomic_mapped = true;
segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); segment->sg_mapped = kmap_atomic(sg_page(sg));
} else { } else {
segment->atomic_mapped = false; segment->atomic_mapped = false;
/* the xmit path can sleep with the page mapped so use kmap */ /* the xmit path can sleep with the page mapped so use kmap */
@ -149,7 +149,7 @@ void iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
{ {
if (segment->sg_mapped) { if (segment->sg_mapped) {
if (segment->atomic_mapped) if (segment->atomic_mapped)
kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0); kunmap_atomic(segment->sg_mapped);
else else
kunmap(sg_page(segment->sg)); kunmap(sg_page(segment->sg));
segment->sg_mapped = NULL; segment->sg_mapped = NULL;

View file

@ -246,9 +246,9 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
} }
local_irq_disable(); local_irq_disable();
buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio); buf = kmap_atomic(bio_page(req->bio));
memcpy(req_data, buf, blk_rq_bytes(req)); memcpy(req_data, buf, blk_rq_bytes(req));
kunmap_atomic(buf - bio_offset(req->bio), KM_USER0); kunmap_atomic(buf - bio_offset(req->bio));
local_irq_enable(); local_irq_enable();
if (req_data[0] != SMP_REQUEST) if (req_data[0] != SMP_REQUEST)
@ -361,10 +361,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
} }
local_irq_disable(); local_irq_disable();
buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio); buf = kmap_atomic(bio_page(rsp->bio));
memcpy(buf, resp_data, blk_rq_bytes(rsp)); memcpy(buf, resp_data, blk_rq_bytes(rsp));
flush_kernel_dcache_page(bio_page(rsp->bio)); flush_kernel_dcache_page(bio_page(rsp->bio));
kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0); kunmap_atomic(buf - bio_offset(rsp->bio));
local_irq_enable(); local_irq_enable();
out: out:

View file

@ -670,10 +670,10 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
struct scatterlist *sg; struct scatterlist *sg;
sg = scsi_sglist(cmd); sg = scsi_sglist(cmd);
buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset; buf = kmap_atomic(sg_page(sg)) + sg->offset;
memset(buf, 0, cmd->cmnd[4]); memset(buf, 0, cmd->cmnd[4]);
kunmap_atomic(buf - sg->offset, KM_IRQ0); kunmap_atomic(buf - sg->offset);
cmd->result = (DID_OK << 16); cmd->result = (DID_OK << 16);
cmd->scsi_done(cmd); cmd->scsi_done(cmd);

View file

@ -1885,11 +1885,11 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
case SAS_PROTOCOL_SMP: { case SAS_PROTOCOL_SMP: {
struct scatterlist *sg_resp = &task->smp_task.smp_resp; struct scatterlist *sg_resp = &task->smp_task.smp_resp;
tstat->stat = SAM_STAT_GOOD; tstat->stat = SAM_STAT_GOOD;
to = kmap_atomic(sg_page(sg_resp), KM_IRQ0); to = kmap_atomic(sg_page(sg_resp));
memcpy(to + sg_resp->offset, memcpy(to + sg_resp->offset,
slot->response + sizeof(struct mvs_err_info), slot->response + sizeof(struct mvs_err_info),
sg_dma_len(sg_resp)); sg_dma_len(sg_resp));
kunmap_atomic(to, KM_IRQ0); kunmap_atomic(to);
break; break;
} }

View file

@ -1778,7 +1778,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) { scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
int len = min(psgl->length, resid); int len = min(psgl->length, resid);
paddr = kmap_atomic(sg_page(psgl), KM_IRQ0) + psgl->offset; paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
memcpy(paddr, dif_storep + dif_offset(sector), len); memcpy(paddr, dif_storep + dif_offset(sector), len);
sector += len >> 3; sector += len >> 3;
@ -1788,7 +1788,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
sector = do_div(tmp_sec, sdebug_store_sectors); sector = do_div(tmp_sec, sdebug_store_sectors);
} }
resid -= len; resid -= len;
kunmap_atomic(paddr, KM_IRQ0); kunmap_atomic(paddr);
} }
dix_reads++; dix_reads++;
@ -1881,12 +1881,12 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
BUG_ON(scsi_sg_count(SCpnt) == 0); BUG_ON(scsi_sg_count(SCpnt) == 0);
BUG_ON(scsi_prot_sg_count(SCpnt) == 0); BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) + psgl->offset; paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
ppage_offset = 0; ppage_offset = 0;
/* For each data page */ /* For each data page */
scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) { scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
daddr = kmap_atomic(sg_page(dsgl), KM_IRQ0) + dsgl->offset; daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset;
/* For each sector-sized chunk in data page */ /* For each sector-sized chunk in data page */
for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) { for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) {
@ -1895,10 +1895,10 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
* protection page advance to the next one * protection page advance to the next one
*/ */
if (ppage_offset >= psgl->length) { if (ppage_offset >= psgl->length) {
kunmap_atomic(paddr, KM_IRQ1); kunmap_atomic(paddr);
psgl = sg_next(psgl); psgl = sg_next(psgl);
BUG_ON(psgl == NULL); BUG_ON(psgl == NULL);
paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) paddr = kmap_atomic(sg_page(psgl))
+ psgl->offset; + psgl->offset;
ppage_offset = 0; ppage_offset = 0;
} }
@ -1971,10 +1971,10 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
ppage_offset += sizeof(struct sd_dif_tuple); ppage_offset += sizeof(struct sd_dif_tuple);
} }
kunmap_atomic(daddr, KM_IRQ0); kunmap_atomic(daddr);
} }
kunmap_atomic(paddr, KM_IRQ1); kunmap_atomic(paddr);
dix_writes++; dix_writes++;
@ -1982,8 +1982,8 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
out: out:
dif_errors++; dif_errors++;
kunmap_atomic(daddr, KM_IRQ0); kunmap_atomic(daddr);
kunmap_atomic(paddr, KM_IRQ1); kunmap_atomic(paddr);
return ret; return ret;
} }
@ -2303,7 +2303,7 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
offset = 0; offset = 0;
for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) { for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0); kaddr = (unsigned char *)kmap_atomic(sg_page(sg));
if (!kaddr) if (!kaddr)
goto out; goto out;
@ -2311,7 +2311,7 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
*(kaddr + sg->offset + j) ^= *(buf + offset + j); *(kaddr + sg->offset + j) ^= *(buf + offset + j);
offset += sg->length; offset += sg->length;
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
ret = 0; ret = 0;
out: out:

View file

@ -2567,7 +2567,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
if (*len > sg_len) if (*len > sg_len)
*len = sg_len; *len = sg_len;
return kmap_atomic(page, KM_BIO_SRC_IRQ); return kmap_atomic(page);
} }
EXPORT_SYMBOL(scsi_kmap_atomic_sg); EXPORT_SYMBOL(scsi_kmap_atomic_sg);
@ -2577,6 +2577,6 @@ EXPORT_SYMBOL(scsi_kmap_atomic_sg);
*/ */
void scsi_kunmap_atomic_sg(void *virt) void scsi_kunmap_atomic_sg(void *virt)
{ {
kunmap_atomic(virt, KM_BIO_SRC_IRQ); kunmap_atomic(virt);
} }
EXPORT_SYMBOL(scsi_kunmap_atomic_sg); EXPORT_SYMBOL(scsi_kunmap_atomic_sg);

View file

@ -392,7 +392,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
virt = bio->bi_integrity->bip_sector & 0xffffffff; virt = bio->bi_integrity->bip_sector & 0xffffffff;
bip_for_each_vec(iv, bio->bi_integrity, i) { bip_for_each_vec(iv, bio->bi_integrity, i) {
sdt = kmap_atomic(iv->bv_page, KM_USER0) sdt = kmap_atomic(iv->bv_page)
+ iv->bv_offset; + iv->bv_offset;
for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) { for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
@ -405,7 +405,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
phys++; phys++;
} }
kunmap_atomic(sdt, KM_USER0); kunmap_atomic(sdt);
} }
bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY); bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY);
@ -414,7 +414,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
return 0; return 0;
error: error:
kunmap_atomic(sdt, KM_USER0); kunmap_atomic(sdt);
sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u, app %4x\n", sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u, app %4x\n",
__func__, virt, phys, be32_to_cpu(sdt->ref_tag), __func__, virt, phys, be32_to_cpu(sdt->ref_tag),
be16_to_cpu(sdt->app_tag)); be16_to_cpu(sdt->app_tag));
@ -453,13 +453,13 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
virt = bio->bi_integrity->bip_sector & 0xffffffff; virt = bio->bi_integrity->bip_sector & 0xffffffff;
bip_for_each_vec(iv, bio->bi_integrity, i) { bip_for_each_vec(iv, bio->bi_integrity, i) {
sdt = kmap_atomic(iv->bv_page, KM_USER0) sdt = kmap_atomic(iv->bv_page)
+ iv->bv_offset; + iv->bv_offset;
for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) { for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
if (sectors == 0) { if (sectors == 0) {
kunmap_atomic(sdt, KM_USER0); kunmap_atomic(sdt);
return; return;
} }
@ -474,7 +474,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
sectors--; sectors--;
} }
kunmap_atomic(sdt, KM_USER0); kunmap_atomic(sdt);
} }
} }
} }

View file

@ -481,6 +481,19 @@ cleanup:
return NULL; return NULL;
} }
/* Disgusting wrapper functions */
static inline unsigned long sg_kmap_atomic(struct scatterlist *sgl, int idx)
{
void *addr = kmap_atomic(sg_page(sgl + idx));
return (unsigned long)addr;
}
static inline void sg_kunmap_atomic(unsigned long addr)
{
kunmap_atomic((void *)addr);
}
/* Assume the original sgl has enough room */ /* Assume the original sgl has enough room */
static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
struct scatterlist *bounce_sgl, struct scatterlist *bounce_sgl,
@ -499,15 +512,12 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
local_irq_save(flags); local_irq_save(flags);
for (i = 0; i < orig_sgl_count; i++) { for (i = 0; i < orig_sgl_count; i++) {
dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])), dest_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
KM_IRQ0) + orig_sgl[i].offset;
dest = dest_addr; dest = dest_addr;
destlen = orig_sgl[i].length; destlen = orig_sgl[i].length;
if (bounce_addr == 0) if (bounce_addr == 0)
bounce_addr = bounce_addr = sg_kmap_atomic(bounce_sgl,j);
(unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
KM_IRQ0);
while (destlen) { while (destlen) {
src = bounce_addr + bounce_sgl[j].offset; src = bounce_addr + bounce_sgl[j].offset;
@ -523,7 +533,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
if (bounce_sgl[j].offset == bounce_sgl[j].length) { if (bounce_sgl[j].offset == bounce_sgl[j].length) {
/* full */ /* full */
kunmap_atomic((void *)bounce_addr, KM_IRQ0); sg_kunmap_atomic(bounce_addr);
j++; j++;
/* /*
@ -537,26 +547,21 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
/* /*
* We are done; cleanup and return. * We are done; cleanup and return.
*/ */
kunmap_atomic((void *)(dest_addr - sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
orig_sgl[i].offset),
KM_IRQ0);
local_irq_restore(flags); local_irq_restore(flags);
return total_copied; return total_copied;
} }
/* if we need to use another bounce buffer */ /* if we need to use another bounce buffer */
if (destlen || i != orig_sgl_count - 1) if (destlen || i != orig_sgl_count - 1)
bounce_addr = bounce_addr = sg_kmap_atomic(bounce_sgl,j);
(unsigned long)kmap_atomic(
sg_page((&bounce_sgl[j])), KM_IRQ0);
} else if (destlen == 0 && i == orig_sgl_count - 1) { } else if (destlen == 0 && i == orig_sgl_count - 1) {
/* unmap the last bounce that is < PAGE_SIZE */ /* unmap the last bounce that is < PAGE_SIZE */
kunmap_atomic((void *)bounce_addr, KM_IRQ0); sg_kunmap_atomic(bounce_addr);
} }
} }
kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset), sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
KM_IRQ0);
} }
local_irq_restore(flags); local_irq_restore(flags);
@ -581,15 +586,12 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
local_irq_save(flags); local_irq_save(flags);
for (i = 0; i < orig_sgl_count; i++) { for (i = 0; i < orig_sgl_count; i++) {
src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])), src_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
KM_IRQ0) + orig_sgl[i].offset;
src = src_addr; src = src_addr;
srclen = orig_sgl[i].length; srclen = orig_sgl[i].length;
if (bounce_addr == 0) if (bounce_addr == 0)
bounce_addr = bounce_addr = sg_kmap_atomic(bounce_sgl,j);
(unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
KM_IRQ0);
while (srclen) { while (srclen) {
/* assume bounce offset always == 0 */ /* assume bounce offset always == 0 */
@ -606,22 +608,20 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
if (bounce_sgl[j].length == PAGE_SIZE) { if (bounce_sgl[j].length == PAGE_SIZE) {
/* full..move to next entry */ /* full..move to next entry */
kunmap_atomic((void *)bounce_addr, KM_IRQ0); sg_kunmap_atomic(bounce_addr);
j++; j++;
/* if we need to use another bounce buffer */ /* if we need to use another bounce buffer */
if (srclen || i != orig_sgl_count - 1) if (srclen || i != orig_sgl_count - 1)
bounce_addr = bounce_addr = sg_kmap_atomic(bounce_sgl,j);
(unsigned long)kmap_atomic(
sg_page((&bounce_sgl[j])), KM_IRQ0);
} else if (srclen == 0 && i == orig_sgl_count - 1) { } else if (srclen == 0 && i == orig_sgl_count - 1) {
/* unmap the last bounce that is < PAGE_SIZE */ /* unmap the last bounce that is < PAGE_SIZE */
kunmap_atomic((void *)bounce_addr, KM_IRQ0); sg_kunmap_atomic(bounce_addr);
} }
} }
kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0); sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
} }
local_irq_restore(flags); local_irq_restore(flags);

View file

@ -56,17 +56,17 @@ static void clear_flag(struct block_header *block, enum blockflags flag)
* This is called from xv_malloc/xv_free path, so it * This is called from xv_malloc/xv_free path, so it
* needs to be fast. * needs to be fast.
*/ */
static void *get_ptr_atomic(struct page *page, u16 offset, enum km_type type) static void *get_ptr_atomic(struct page *page, u16 offset)
{ {
unsigned char *base; unsigned char *base;
base = kmap_atomic(page, type); base = kmap_atomic(page);
return base + offset; return base + offset;
} }
static void put_ptr_atomic(void *ptr, enum km_type type) static void put_ptr_atomic(void *ptr)
{ {
kunmap_atomic(ptr, type); kunmap_atomic(ptr);
} }
static u32 get_blockprev(struct block_header *block) static u32 get_blockprev(struct block_header *block)
@ -202,10 +202,10 @@ static void insert_block(struct xv_pool *pool, struct page *page, u32 offset,
if (block->link.next_page) { if (block->link.next_page) {
nextblock = get_ptr_atomic(block->link.next_page, nextblock = get_ptr_atomic(block->link.next_page,
block->link.next_offset, KM_USER1); block->link.next_offset);
nextblock->link.prev_page = page; nextblock->link.prev_page = page;
nextblock->link.prev_offset = offset; nextblock->link.prev_offset = offset;
put_ptr_atomic(nextblock, KM_USER1); put_ptr_atomic(nextblock);
/* If there was a next page then the free bits are set. */ /* If there was a next page then the free bits are set. */
return; return;
} }
@ -225,18 +225,18 @@ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
if (block->link.prev_page) { if (block->link.prev_page) {
tmpblock = get_ptr_atomic(block->link.prev_page, tmpblock = get_ptr_atomic(block->link.prev_page,
block->link.prev_offset, KM_USER1); block->link.prev_offset);
tmpblock->link.next_page = block->link.next_page; tmpblock->link.next_page = block->link.next_page;
tmpblock->link.next_offset = block->link.next_offset; tmpblock->link.next_offset = block->link.next_offset;
put_ptr_atomic(tmpblock, KM_USER1); put_ptr_atomic(tmpblock);
} }
if (block->link.next_page) { if (block->link.next_page) {
tmpblock = get_ptr_atomic(block->link.next_page, tmpblock = get_ptr_atomic(block->link.next_page,
block->link.next_offset, KM_USER1); block->link.next_offset);
tmpblock->link.prev_page = block->link.prev_page; tmpblock->link.prev_page = block->link.prev_page;
tmpblock->link.prev_offset = block->link.prev_offset; tmpblock->link.prev_offset = block->link.prev_offset;
put_ptr_atomic(tmpblock, KM_USER1); put_ptr_atomic(tmpblock);
} }
/* Is this block is at the head of the freelist? */ /* Is this block is at the head of the freelist? */
@ -249,11 +249,10 @@ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
if (pool->freelist[slindex].page) { if (pool->freelist[slindex].page) {
struct block_header *tmpblock; struct block_header *tmpblock;
tmpblock = get_ptr_atomic(pool->freelist[slindex].page, tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
pool->freelist[slindex].offset, pool->freelist[slindex].offset);
KM_USER1);
tmpblock->link.prev_page = NULL; tmpblock->link.prev_page = NULL;
tmpblock->link.prev_offset = 0; tmpblock->link.prev_offset = 0;
put_ptr_atomic(tmpblock, KM_USER1); put_ptr_atomic(tmpblock);
} else { } else {
/* This freelist bucket is empty */ /* This freelist bucket is empty */
__clear_bit(slindex % BITS_PER_LONG, __clear_bit(slindex % BITS_PER_LONG,
@ -284,7 +283,7 @@ static int grow_pool(struct xv_pool *pool, gfp_t flags)
stat_inc(&pool->total_pages); stat_inc(&pool->total_pages);
spin_lock(&pool->lock); spin_lock(&pool->lock);
block = get_ptr_atomic(page, 0, KM_USER0); block = get_ptr_atomic(page, 0);
block->size = PAGE_SIZE - XV_ALIGN; block->size = PAGE_SIZE - XV_ALIGN;
set_flag(block, BLOCK_FREE); set_flag(block, BLOCK_FREE);
@ -293,7 +292,7 @@ static int grow_pool(struct xv_pool *pool, gfp_t flags)
insert_block(pool, page, 0, block); insert_block(pool, page, 0, block);
put_ptr_atomic(block, KM_USER0); put_ptr_atomic(block);
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
return 0; return 0;
@ -375,7 +374,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
return -ENOMEM; return -ENOMEM;
} }
block = get_ptr_atomic(*page, *offset, KM_USER0); block = get_ptr_atomic(*page, *offset);
remove_block(pool, *page, *offset, block, index); remove_block(pool, *page, *offset, block, index);
@ -405,7 +404,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
block->size = origsize; block->size = origsize;
clear_flag(block, BLOCK_FREE); clear_flag(block, BLOCK_FREE);
put_ptr_atomic(block, KM_USER0); put_ptr_atomic(block);
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
*offset += XV_ALIGN; *offset += XV_ALIGN;
@ -426,7 +425,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
spin_lock(&pool->lock); spin_lock(&pool->lock);
page_start = get_ptr_atomic(page, 0, KM_USER0); page_start = get_ptr_atomic(page, 0);
block = (struct block_header *)((char *)page_start + offset); block = (struct block_header *)((char *)page_start + offset);
/* Catch double free bugs */ /* Catch double free bugs */
@ -468,7 +467,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
/* No used objects in this page. Free it. */ /* No used objects in this page. Free it. */
if (block->size == PAGE_SIZE - XV_ALIGN) { if (block->size == PAGE_SIZE - XV_ALIGN) {
put_ptr_atomic(page_start, KM_USER0); put_ptr_atomic(page_start);
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
__free_page(page); __free_page(page);
@ -486,7 +485,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
set_blockprev(tmpblock, offset); set_blockprev(tmpblock, offset);
} }
put_ptr_atomic(page_start, KM_USER0); put_ptr_atomic(page_start);
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
} }
EXPORT_SYMBOL_GPL(xv_free); EXPORT_SYMBOL_GPL(xv_free);

View file

@ -496,13 +496,13 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
} }
ASSERT_SENTINEL(zh, ZBH); ASSERT_SENTINEL(zh, ZBH);
BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size()); BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
to_va = kmap_atomic(page, KM_USER0); to_va = kmap_atomic(page);
size = zh->size; size = zh->size;
from_va = zbud_data(zh, size); from_va = zbud_data(zh, size);
ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len); ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len);
BUG_ON(ret != LZO_E_OK); BUG_ON(ret != LZO_E_OK);
BUG_ON(out_len != PAGE_SIZE); BUG_ON(out_len != PAGE_SIZE);
kunmap_atomic(to_va, KM_USER0); kunmap_atomic(to_va);
out: out:
spin_unlock(&zbpg->lock); spin_unlock(&zbpg->lock);
return ret; return ret;
@ -1109,7 +1109,7 @@ static struct zv_hdr *zv_create(struct zcache_client *cli, uint32_t pool_id,
goto out; goto out;
atomic_inc(&zv_curr_dist_counts[chunks]); atomic_inc(&zv_curr_dist_counts[chunks]);
atomic_inc(&zv_cumul_dist_counts[chunks]); atomic_inc(&zv_cumul_dist_counts[chunks]);
zv = kmap_atomic(page, KM_USER0) + offset; zv = kmap_atomic(page) + offset;
zv->index = index; zv->index = index;
zv->oid = *oid; zv->oid = *oid;
zv->pool_id = pool_id; zv->pool_id = pool_id;
@ -1123,7 +1123,7 @@ static struct zv_hdr *zv_create(struct zcache_client *cli, uint32_t pool_id,
spin_unlock(&zcache_rem_op_list_lock); spin_unlock(&zcache_rem_op_list_lock);
} }
memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen); memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
kunmap_atomic(zv, KM_USER0); kunmap_atomic(zv);
out: out:
return zv; return zv;
} }
@ -1145,7 +1145,7 @@ static struct zv_hdr *zv_alloc(struct tmem_pool *pool,
&page, &offset, ZCACHE_GFP_MASK); &page, &offset, ZCACHE_GFP_MASK);
if (unlikely(ret)) if (unlikely(ret))
goto out; goto out;
zv = kmap_atomic(page, KM_USER0) + offset; zv = kmap_atomic(page) + offset;
SET_SENTINEL(zv, ZVH); SET_SENTINEL(zv, ZVH);
INIT_LIST_HEAD(&zv->rem_op.list); INIT_LIST_HEAD(&zv->rem_op.list);
zv->client_id = LOCAL_CLIENT; zv->client_id = LOCAL_CLIENT;
@ -1153,7 +1153,7 @@ static struct zv_hdr *zv_alloc(struct tmem_pool *pool,
zv->index = index; zv->index = index;
zv->oid = *oid; zv->oid = *oid;
zv->pool_id = pool->pool_id; zv->pool_id = pool->pool_id;
kunmap_atomic(zv, KM_USER0); kunmap_atomic(zv);
out: out:
return zv; return zv;
} }
@ -1194,10 +1194,10 @@ static void zv_decompress(struct page *page, struct zv_hdr *zv)
ASSERT_SENTINEL(zv, ZVH); ASSERT_SENTINEL(zv, ZVH);
size = xv_get_object_size(zv) - sizeof(*zv); size = xv_get_object_size(zv) - sizeof(*zv);
BUG_ON(size == 0); BUG_ON(size == 0);
to_va = kmap_atomic(page, KM_USER0); to_va = kmap_atomic(page);
ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv), ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv),
size, to_va, &clen); size, to_va, &clen);
kunmap_atomic(to_va, KM_USER0); kunmap_atomic(to_va);
BUG_ON(ret != LZO_E_OK); BUG_ON(ret != LZO_E_OK);
BUG_ON(clen != PAGE_SIZE); BUG_ON(clen != PAGE_SIZE);
} }
@ -2203,12 +2203,12 @@ static int zcache_compress(struct page *from, void **out_va, size_t *out_len)
BUG_ON(!irqs_disabled()); BUG_ON(!irqs_disabled());
if (unlikely(dmem == NULL || wmem == NULL)) if (unlikely(dmem == NULL || wmem == NULL))
goto out; /* no buffer, so can't compress */ goto out; /* no buffer, so can't compress */
from_va = kmap_atomic(from, KM_USER0); from_va = kmap_atomic(from);
mb(); mb();
ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem); ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem);
BUG_ON(ret != LZO_E_OK); BUG_ON(ret != LZO_E_OK);
*out_va = dmem; *out_va = dmem;
kunmap_atomic(from_va, KM_USER0); kunmap_atomic(from_va);
ret = 1; ret = 1;
out: out:
return ret; return ret;

View file

@ -71,8 +71,8 @@ static int crypt(struct crypto_tfm *tfm,
u8 *src_p, *dst_p; u8 *src_p, *dst_p;
int in_place; int in_place;
scatterwalk_map(&walk_in, 0); scatterwalk_map(&walk_in);
scatterwalk_map(&walk_out, 1); scatterwalk_map(&walk_out);
src_p = scatterwalk_whichbuf(&walk_in, bsize, tmp_src); src_p = scatterwalk_whichbuf(&walk_in, bsize, tmp_src);
dst_p = scatterwalk_whichbuf(&walk_out, bsize, tmp_dst); dst_p = scatterwalk_whichbuf(&walk_out, bsize, tmp_dst);
in_place = scatterwalk_samebuf(&walk_in, &walk_out, in_place = scatterwalk_samebuf(&walk_in, &walk_out,
@ -84,10 +84,10 @@ static int crypt(struct crypto_tfm *tfm,
prfn(tfm, dst_p, src_p, crfn, enc, info, in_place); prfn(tfm, dst_p, src_p, crfn, enc, info, in_place);
scatterwalk_done(&walk_in, 0, nbytes); scatterwalk_done(&walk_in, nbytes);
scatterwalk_copychunks(dst_p, &walk_out, bsize, 1); scatterwalk_copychunks(dst_p, &walk_out, bsize, 1);
scatterwalk_done(&walk_out, 1, nbytes); scatterwalk_done(&walk_out, nbytes);
if (!nbytes) if (!nbytes)
return 0; return 0;

View file

@ -39,12 +39,12 @@ static void update(struct crypto_tfm *tfm,
unsigned int bytes_from_page = min(l, ((unsigned int) unsigned int bytes_from_page = min(l, ((unsigned int)
(PAGE_SIZE)) - (PAGE_SIZE)) -
offset); offset);
char *p = crypto_kmap(pg, 0) + offset; char *p = kmap_atomic(pg) + offset;
tfm->__crt_alg->cra_digest.dia_update tfm->__crt_alg->cra_digest.dia_update
(crypto_tfm_ctx(tfm), p, (crypto_tfm_ctx(tfm), p,
bytes_from_page); bytes_from_page);
crypto_kunmap(p, 0); kunmap_atomic(p);
crypto_yield(tfm); crypto_yield(tfm);
offset = 0; offset = 0;
pg++; pg++;
@ -75,10 +75,10 @@ static void digest(struct crypto_tfm *tfm,
tfm->crt_digest.dit_init(tfm); tfm->crt_digest.dit_init(tfm);
for (i = 0; i < nsg; i++) { for (i = 0; i < nsg; i++) {
char *p = crypto_kmap(sg[i].page, 0) + sg[i].offset; char *p = kmap_atomic(sg[i].page) + sg[i].offset;
tfm->__crt_alg->cra_digest.dia_update(crypto_tfm_ctx(tfm), tfm->__crt_alg->cra_digest.dia_update(crypto_tfm_ctx(tfm),
p, sg[i].length); p, sg[i].length);
crypto_kunmap(p, 0); kunmap_atomic(p);
crypto_yield(tfm); crypto_yield(tfm);
} }
crypto_digest_final(tfm, out); crypto_digest_final(tfm, out);

View file

@ -23,23 +23,6 @@
#include <asm/kmap_types.h> #include <asm/kmap_types.h>
extern enum km_type crypto_km_types[];
static inline enum km_type crypto_kmap_type(int out)
{
return crypto_km_types[(in_softirq() ? 2 : 0) + out];
}
static inline void *crypto_kmap(struct page *page, int out)
{
return kmap_atomic(page, crypto_kmap_type(out));
}
static inline void crypto_kunmap(void *vaddr, int out)
{
kunmap_atomic(vaddr, crypto_kmap_type(out));
}
static inline void crypto_yield(struct crypto_tfm *tfm) static inline void crypto_yield(struct crypto_tfm *tfm)
{ {
if (!in_softirq()) if (!in_softirq())

View file

@ -1,20 +0,0 @@
#ifndef __KMAP_TYPES_H
#define __KMAP_TYPES_H
enum km_type {
KM_BOUNCE_READ,
KM_SKB_SUNRPC_DATA,
KM_SKB_DATA_SOFTIRQ,
KM_USER0,
KM_USER1,
KM_BH_IRQ,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
KM_TYPE_NR
};
#define _ASM_KMAP_TYPES_H
#endif

View file

@ -13,8 +13,6 @@
* any later version. * any later version.
* *
*/ */
#include "kmap_types.h"
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
@ -23,13 +21,6 @@
#include "internal.h" #include "internal.h"
#include "scatterwalk.h" #include "scatterwalk.h"
enum km_type crypto_km_types[] = {
KM_USER0,
KM_USER1,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
};
void *scatterwalk_whichbuf(struct scatter_walk *walk, unsigned int nbytes, void *scratch) void *scatterwalk_whichbuf(struct scatter_walk *walk, unsigned int nbytes, void *scratch)
{ {
if (nbytes <= walk->len_this_page && if (nbytes <= walk->len_this_page &&
@ -62,9 +53,9 @@ void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
walk->offset = sg->offset; walk->offset = sg->offset;
} }
void scatterwalk_map(struct scatter_walk *walk, int out) void scatterwalk_map(struct scatter_walk *walk)
{ {
walk->data = crypto_kmap(walk->page, out) + walk->offset; walk->data = kmap_atomic(walk->page) + walk->offset;
} }
static void scatterwalk_pagedone(struct scatter_walk *walk, int out, static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
@ -103,7 +94,7 @@ void scatterwalk_done(struct scatter_walk *walk, int out, int more)
* has been verified as multiple of the block size. * has been verified as multiple of the block size.
*/ */
int scatterwalk_copychunks(void *buf, struct scatter_walk *walk, int scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
size_t nbytes, int out) size_t nbytes)
{ {
if (buf != walk->data) { if (buf != walk->data) {
while (nbytes > walk->len_this_page) { while (nbytes > walk->len_this_page) {
@ -111,9 +102,9 @@ int scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
buf += walk->len_this_page; buf += walk->len_this_page;
nbytes -= walk->len_this_page; nbytes -= walk->len_this_page;
crypto_kunmap(walk->data, out); kunmap_atomic(walk->data);
scatterwalk_pagedone(walk, out, 1); scatterwalk_pagedone(walk, out, 1);
scatterwalk_map(walk, out); scatterwalk_map(walk);
} }
memcpy_dir(buf, walk->data, nbytes, out); memcpy_dir(buf, walk->data, nbytes, out);

View file

@ -455,14 +455,14 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
} }
ASSERT_SENTINEL(zh, ZBH); ASSERT_SENTINEL(zh, ZBH);
BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size()); BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
to_va = kmap_atomic(page, KM_USER0); to_va = kmap_atomic(page);
size = zh->size; size = zh->size;
from_va = zbud_data(zh, size); from_va = zbud_data(zh, size);
ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size, ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
to_va, &out_len); to_va, &out_len);
BUG_ON(ret); BUG_ON(ret);
BUG_ON(out_len != PAGE_SIZE); BUG_ON(out_len != PAGE_SIZE);
kunmap_atomic(to_va, KM_USER0); kunmap_atomic(to_va);
out: out:
spin_unlock(&zbpg->lock); spin_unlock(&zbpg->lock);
return ret; return ret;
@ -753,10 +753,10 @@ static void zv_decompress(struct page *page, void *handle)
zv = zs_map_object(zcache_host.zspool, handle); zv = zs_map_object(zcache_host.zspool, handle);
BUG_ON(zv->size == 0); BUG_ON(zv->size == 0);
ASSERT_SENTINEL(zv, ZVH); ASSERT_SENTINEL(zv, ZVH);
to_va = kmap_atomic(page, KM_USER0); to_va = kmap_atomic(page);
ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv), ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv),
zv->size, to_va, &clen); zv->size, to_va, &clen);
kunmap_atomic(to_va, KM_USER0); kunmap_atomic(to_va);
zs_unmap_object(zcache_host.zspool, handle); zs_unmap_object(zcache_host.zspool, handle);
BUG_ON(ret); BUG_ON(ret);
BUG_ON(clen != PAGE_SIZE); BUG_ON(clen != PAGE_SIZE);
@ -1334,13 +1334,13 @@ static int zcache_compress(struct page *from, void **out_va, unsigned *out_len)
if (unlikely(dmem == NULL)) if (unlikely(dmem == NULL))
goto out; /* no buffer or no compressor so can't compress */ goto out; /* no buffer or no compressor so can't compress */
*out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER; *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
from_va = kmap_atomic(from, KM_USER0); from_va = kmap_atomic(from);
mb(); mb();
ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem, ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem,
out_len); out_len);
BUG_ON(ret); BUG_ON(ret);
*out_va = dmem; *out_va = dmem;
kunmap_atomic(from_va, KM_USER0); kunmap_atomic(from_va);
ret = 1; ret = 1;
out: out:
return ret; return ret;

View file

@ -175,9 +175,9 @@ static void handle_zero_page(struct bio_vec *bvec)
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
void *user_mem; void *user_mem;
user_mem = kmap_atomic(page, KM_USER0); user_mem = kmap_atomic(page);
memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
kunmap_atomic(user_mem, KM_USER0); kunmap_atomic(user_mem);
flush_dcache_page(page); flush_dcache_page(page);
} }
@ -188,12 +188,12 @@ static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
unsigned char *user_mem, *cmem; unsigned char *user_mem, *cmem;
user_mem = kmap_atomic(page, KM_USER0); user_mem = kmap_atomic(page);
cmem = kmap_atomic(zram->table[index].handle, KM_USER1); cmem = kmap_atomic(zram->table[index].handle);
memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len); memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
kunmap_atomic(cmem, KM_USER1); kunmap_atomic(cmem);
kunmap_atomic(user_mem, KM_USER0); kunmap_atomic(user_mem);
flush_dcache_page(page); flush_dcache_page(page);
} }
@ -242,7 +242,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
} }
} }
user_mem = kmap_atomic(page, KM_USER0); user_mem = kmap_atomic(page);
if (!is_partial_io(bvec)) if (!is_partial_io(bvec))
uncmem = user_mem; uncmem = user_mem;
clen = PAGE_SIZE; clen = PAGE_SIZE;
@ -260,7 +260,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
} }
zs_unmap_object(zram->mem_pool, zram->table[index].handle); zs_unmap_object(zram->mem_pool, zram->table[index].handle);
kunmap_atomic(user_mem, KM_USER0); kunmap_atomic(user_mem);
/* Should NEVER happen. Return bio error if it does. */ /* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret != LZO_E_OK)) { if (unlikely(ret != LZO_E_OK)) {
@ -292,7 +292,7 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
/* Page is stored uncompressed since it's incompressible */ /* Page is stored uncompressed since it's incompressible */
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
memcpy(mem, cmem, PAGE_SIZE); memcpy(mem, cmem, PAGE_SIZE);
kunmap_atomic(cmem, KM_USER0); kunmap_atomic(cmem);
return 0; return 0;
} }
@ -351,7 +351,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
zram_test_flag(zram, index, ZRAM_ZERO)) zram_test_flag(zram, index, ZRAM_ZERO))
zram_free_page(zram, index); zram_free_page(zram, index);
user_mem = kmap_atomic(page, KM_USER0); user_mem = kmap_atomic(page);
if (is_partial_io(bvec)) if (is_partial_io(bvec))
memcpy(uncmem + offset, user_mem + bvec->bv_offset, memcpy(uncmem + offset, user_mem + bvec->bv_offset,
@ -360,7 +360,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
uncmem = user_mem; uncmem = user_mem;
if (page_zero_filled(uncmem)) { if (page_zero_filled(uncmem)) {
kunmap_atomic(user_mem, KM_USER0); kunmap_atomic(user_mem);
if (is_partial_io(bvec)) if (is_partial_io(bvec))
kfree(uncmem); kfree(uncmem);
zram_stat_inc(&zram->stats.pages_zero); zram_stat_inc(&zram->stats.pages_zero);
@ -372,7 +372,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen, ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
zram->compress_workmem); zram->compress_workmem);
kunmap_atomic(user_mem, KM_USER0); kunmap_atomic(user_mem);
if (is_partial_io(bvec)) if (is_partial_io(bvec))
kfree(uncmem); kfree(uncmem);
@ -400,8 +400,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
zram_set_flag(zram, index, ZRAM_UNCOMPRESSED); zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
zram_stat_inc(&zram->stats.pages_expand); zram_stat_inc(&zram->stats.pages_expand);
handle = page_store; handle = page_store;
src = kmap_atomic(page, KM_USER0); src = kmap_atomic(page);
cmem = kmap_atomic(page_store, KM_USER1); cmem = kmap_atomic(page_store);
goto memstore; goto memstore;
} }
@ -427,8 +427,8 @@ memstore:
memcpy(cmem, src, clen); memcpy(cmem, src, clen);
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
kunmap_atomic(cmem, KM_USER1); kunmap_atomic(cmem);
kunmap_atomic(src, KM_USER0); kunmap_atomic(src);
} else { } else {
zs_unmap_object(zram->mem_pool, handle); zs_unmap_object(zram->mem_pool, handle);
} }

View file

@ -2344,7 +2344,7 @@ static void transport_xor_callback(struct se_cmd *cmd)
offset = 0; offset = 0;
for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
addr = kmap_atomic(sg_page(sg), KM_USER0); addr = kmap_atomic(sg_page(sg));
if (!addr) if (!addr)
goto out; goto out;
@ -2352,7 +2352,7 @@ static void transport_xor_callback(struct se_cmd *cmd)
*(addr + sg->offset + i) ^= *(buf + offset + i); *(addr + sg->offset + i) ^= *(buf + offset + i);
offset += sg->length; offset += sg->length;
kunmap_atomic(addr, KM_USER0); kunmap_atomic(addr);
} }
out: out:

View file

@ -146,14 +146,13 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
PAGE_SIZE << compound_order(page); PAGE_SIZE << compound_order(page);
} else { } else {
BUG_ON(!page); BUG_ON(!page);
from = kmap_atomic(page + (mem_off >> PAGE_SHIFT), from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
KM_SOFTIRQ0);
page_addr = from; page_addr = from;
from += mem_off & ~PAGE_MASK; from += mem_off & ~PAGE_MASK;
tlen = min(tlen, (size_t)(PAGE_SIZE - tlen = min(tlen, (size_t)(PAGE_SIZE -
(mem_off & ~PAGE_MASK))); (mem_off & ~PAGE_MASK)));
memcpy(to, from, tlen); memcpy(to, from, tlen);
kunmap_atomic(page_addr, KM_SOFTIRQ0); kunmap_atomic(page_addr);
to += tlen; to += tlen;
} }
@ -291,14 +290,13 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
tlen = min(mem_len, frame_len); tlen = min(mem_len, frame_len);
to = kmap_atomic(page + (mem_off >> PAGE_SHIFT), to = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
KM_SOFTIRQ0);
page_addr = to; page_addr = to;
to += mem_off & ~PAGE_MASK; to += mem_off & ~PAGE_MASK;
tlen = min(tlen, (size_t)(PAGE_SIZE - tlen = min(tlen, (size_t)(PAGE_SIZE -
(mem_off & ~PAGE_MASK))); (mem_off & ~PAGE_MASK)));
memcpy(to, from, tlen); memcpy(to, from, tlen);
kunmap_atomic(page_addr, KM_SOFTIRQ0); kunmap_atomic(page_addr);
from += tlen; from += tlen;
frame_len -= tlen; frame_len -= tlen;

View file

@ -937,9 +937,9 @@ static int set_bit_to_user(int nr, void __user *addr)
if (r < 0) if (r < 0)
return r; return r;
BUG_ON(r != 1); BUG_ON(r != 1);
base = kmap_atomic(page, KM_USER0); base = kmap_atomic(page);
set_bit(bit, base); set_bit(bit, base);
kunmap_atomic(base, KM_USER0); kunmap_atomic(base);
set_page_dirty_lock(page); set_page_dirty_lock(page);
put_page(page); put_page(page);
return 0; return 0;

Some files were not shown because too many files have changed in this diff Show more