x86-64, copy_user: Remove zero byte check before copy user buffer.
Operation of rep movsb instruction handles zero byte copy. As pointed out by Linus, there is no need to check zero size in kernel. Removing this redundant check saves a few cycles in copy user functions. Reported-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Link: http://lkml.kernel.org/r/1384634221-6006-1-git-send-email-fenghua.yu@intel.com Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
parent
1213959d4a
commit
f4cb1cc18f
1 changed files with 2 additions and 6 deletions
|
@ -236,8 +236,6 @@ ENDPROC(copy_user_generic_unrolled)
|
|||
ENTRY(copy_user_generic_string)
|
||||
CFI_STARTPROC
|
||||
ASM_STAC
|
||||
andl %edx,%edx
|
||||
jz 4f
|
||||
cmpl $8,%edx
|
||||
jb 2f /* less than 8 bytes, go to byte copy loop */
|
||||
ALIGN_DESTINATION
|
||||
|
@ -249,7 +247,7 @@ ENTRY(copy_user_generic_string)
|
|||
2: movl %edx,%ecx
|
||||
3: rep
|
||||
movsb
|
||||
4: xorl %eax,%eax
|
||||
xorl %eax,%eax
|
||||
ASM_CLAC
|
||||
ret
|
||||
|
||||
|
@ -279,12 +277,10 @@ ENDPROC(copy_user_generic_string)
|
|||
ENTRY(copy_user_enhanced_fast_string)
|
||||
CFI_STARTPROC
|
||||
ASM_STAC
|
||||
andl %edx,%edx
|
||||
jz 2f
|
||||
movl %edx,%ecx
|
||||
1: rep
|
||||
movsb
|
||||
2: xorl %eax,%eax
|
||||
xorl %eax,%eax
|
||||
ASM_CLAC
|
||||
ret
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue