x86/fpu: Use 'struct fpu' in fpu__unlazy_stopped()
Migrate this function to pure 'struct fpu' usage. Reviewed-by: Borislav Petkov <bp@alien8.de> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
db2b1d3ad1
commit
cc08d54599
1 changed files with 17 additions and 12 deletions
|
@ -311,27 +311,26 @@ EXPORT_SYMBOL_GPL(fpstate_alloc_init);
|
||||||
* the read-only case, it's not strictly necessary for
|
* the read-only case, it's not strictly necessary for
|
||||||
* read-only access to the context.
|
* read-only access to the context.
|
||||||
*/
|
*/
|
||||||
static int fpu__unlazy_stopped(struct task_struct *child)
|
static int fpu__unlazy_stopped(struct fpu *child_fpu)
|
||||||
{
|
{
|
||||||
struct fpu *child_fpu = &child->thread.fpu;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (WARN_ON_ONCE(child == current))
|
if (WARN_ON_ONCE(child_fpu == ¤t->thread.fpu))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (child_fpu->fpstate_active) {
|
if (child_fpu->fpstate_active) {
|
||||||
child->thread.fpu.last_cpu = -1;
|
child_fpu->last_cpu = -1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Memory allocation at the first usage of the FPU and other state.
|
* Memory allocation at the first usage of the FPU and other state.
|
||||||
*/
|
*/
|
||||||
ret = fpstate_alloc(&child->thread.fpu);
|
ret = fpstate_alloc(child_fpu);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
fpstate_init(&child->thread.fpu);
|
fpstate_init(child_fpu);
|
||||||
|
|
||||||
/* Safe to do for stopped child tasks: */
|
/* Safe to do for stopped child tasks: */
|
||||||
child_fpu->fpstate_active = 1;
|
child_fpu->fpstate_active = 1;
|
||||||
|
@ -426,12 +425,13 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
|
||||||
unsigned int pos, unsigned int count,
|
unsigned int pos, unsigned int count,
|
||||||
void *kbuf, void __user *ubuf)
|
void *kbuf, void __user *ubuf)
|
||||||
{
|
{
|
||||||
|
struct fpu *fpu = &target->thread.fpu;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!cpu_has_fxsr)
|
if (!cpu_has_fxsr)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
ret = fpu__unlazy_stopped(target);
|
ret = fpu__unlazy_stopped(fpu);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -445,12 +445,13 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
|
||||||
unsigned int pos, unsigned int count,
|
unsigned int pos, unsigned int count,
|
||||||
const void *kbuf, const void __user *ubuf)
|
const void *kbuf, const void __user *ubuf)
|
||||||
{
|
{
|
||||||
|
struct fpu *fpu = &target->thread.fpu;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!cpu_has_fxsr)
|
if (!cpu_has_fxsr)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
ret = fpu__unlazy_stopped(target);
|
ret = fpu__unlazy_stopped(fpu);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -478,13 +479,14 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
|
||||||
unsigned int pos, unsigned int count,
|
unsigned int pos, unsigned int count,
|
||||||
void *kbuf, void __user *ubuf)
|
void *kbuf, void __user *ubuf)
|
||||||
{
|
{
|
||||||
|
struct fpu *fpu = &target->thread.fpu;
|
||||||
struct xsave_struct *xsave;
|
struct xsave_struct *xsave;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!cpu_has_xsave)
|
if (!cpu_has_xsave)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
ret = fpu__unlazy_stopped(target);
|
ret = fpu__unlazy_stopped(fpu);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -508,13 +510,14 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
|
||||||
unsigned int pos, unsigned int count,
|
unsigned int pos, unsigned int count,
|
||||||
const void *kbuf, const void __user *ubuf)
|
const void *kbuf, const void __user *ubuf)
|
||||||
{
|
{
|
||||||
|
struct fpu *fpu = &target->thread.fpu;
|
||||||
struct xsave_struct *xsave;
|
struct xsave_struct *xsave;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!cpu_has_xsave)
|
if (!cpu_has_xsave)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
ret = fpu__unlazy_stopped(target);
|
ret = fpu__unlazy_stopped(fpu);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -674,10 +677,11 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
|
||||||
unsigned int pos, unsigned int count,
|
unsigned int pos, unsigned int count,
|
||||||
void *kbuf, void __user *ubuf)
|
void *kbuf, void __user *ubuf)
|
||||||
{
|
{
|
||||||
|
struct fpu *fpu = &target->thread.fpu;
|
||||||
struct user_i387_ia32_struct env;
|
struct user_i387_ia32_struct env;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = fpu__unlazy_stopped(target);
|
ret = fpu__unlazy_stopped(fpu);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -705,10 +709,11 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
|
||||||
unsigned int pos, unsigned int count,
|
unsigned int pos, unsigned int count,
|
||||||
const void *kbuf, const void __user *ubuf)
|
const void *kbuf, const void __user *ubuf)
|
||||||
{
|
{
|
||||||
|
struct fpu *fpu = &target->thread.fpu;
|
||||||
struct user_i387_ia32_struct env;
|
struct user_i387_ia32_struct env;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = fpu__unlazy_stopped(target);
|
ret = fpu__unlazy_stopped(fpu);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue