BACKPORT: arm64: move sp_el0 and tpidr_el1 into cpu_suspend_ctx

When returning from idle, we rely on the fact that thread_info lives at
the end of the kernel stack, and restore this by masking the saved stack
pointer. Subsequent patches will sever the relationship between the
stack and thread_info, and to cater for this we must save/restore sp_el0
explicitly, storing it in cpu_suspend_ctx.

As cpu_suspend_ctx must be doubleword aligned, this leaves us with an
extra slot in cpu_suspend_ctx. We can use this to save/restore tpidr_el1
in the same way, which simplifies the code, avoiding pointer chasing on
the restore path (as we no longer need to load thread_info::cpu followed
by the relevant slot in __per_cpu_offset based on this).

This patch stashes both registers in cpu_suspend_ctx.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Laura Abbott <labbott@redhat.com>
Cc: James Morse <james.morse@arm.com>
Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

This is a modification of Mark Rutland's original patch. The differences
from the original patch are as follows :-
	- NR_CTX_REGS is set to 13 instead of 12
	- x13 and x14 are used as temporary registers to hold sp_el0 and
	  tpidr_el1 instead of x11 and x12.
	- The values are temporarily stashed at offset 88 and 96 of
	  cpu_suspend_ctx instead of 80 and 88.

The original patch would not apply cleanly and these changes were made
to resolve this.

Bug: 38331309
Change-Id: I4e72aebd51e99d3767487383c14a1ba784312bf1
(cherry picked from commit 623b476fc815464a0241ea7483da7b3580b7d8ac)
Signed-off-by: Zubin Mithra <zsm@google.com>
This commit is contained in:
Mark Rutland 2016-11-03 20:23:09 +00:00 committed by Zubin Mithra
parent 0f9f933796
commit 48dd80cb13
4 changed files with 9 additions and 12 deletions

View file

@ -1,7 +1,7 @@
#ifndef __ASM_SUSPEND_H
#define __ASM_SUSPEND_H
#define NR_CTX_REGS 11
#define NR_CTX_REGS 13
/*
* struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on

View file

@ -173,9 +173,6 @@ ENTRY(cpu_resume)
/* load physical address of identity map page table in x1 */
adrp x1, idmap_pg_dir
mov sp, x2
/* save thread_info */
and x2, x2, #~(THREAD_SIZE - 1)
msr sp_el0, x2
/*
* cpu_do_resume expects x0 to contain context physical address
* pointer and x1 to contain physical address of 1:1 page tables

View file

@ -95,12 +95,6 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
*/
cpu_uninstall_idmap();
/*
* Restore per-cpu offset before any kernel
* subsystem relying on it has a chance to run.
*/
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
/*
* PSTATE was not saved over suspend/resume, re-enable any
* detected features that might not have been set correctly.

View file

@ -71,12 +71,15 @@ ENTRY(cpu_do_suspend)
mrs x10, mdscr_el1
mrs x11, oslsr_el1
mrs x12, sctlr_el1
mrs x13, tpidr_el1
mrs x14, sp_el0
stp x2, x3, [x0]
stp x4, x5, [x0, #16]
stp x6, x7, [x0, #32]
stp x8, x9, [x0, #48]
stp x10, x11, [x0, #64]
str x12, [x0, #80]
stp x12, x13, [x0, #80]
str x14, [x0, #96]
ret
ENDPROC(cpu_do_suspend)
@ -99,7 +102,8 @@ ENTRY(cpu_do_resume)
ldp x6, x7, [x0, #32]
ldp x8, x9, [x0, #48]
ldp x10, x11, [x0, #64]
ldr x12, [x0, #80]
ldp x12, x13, [x0, #80]
ldr x14, [x0, #96]
msr tpidr_el0, x2
msr tpidrro_el0, x3
msr contextidr_el1, x4
@ -111,6 +115,8 @@ ENTRY(cpu_do_resume)
msr tcr_el1, x8
msr vbar_el1, x9
msr mdscr_el1, x10
msr tpidr_el1, x13
msr sp_el0, x14
/*
* Restore oslsr_el1 by writing oslar_el1
*/