Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (77 commits) x86: UV startup of slave cpus x86: integrate pci-dma.c x86: don't do dma if mask is NULL. x86: return conditional to mmu x86: remove kludge from x86_64 x86: unify gfp masks x86: retry allocation if failed x86: don't try to allocate from DMA zone at first x86: use a fallback dev for i386 x86: use numa allocation function in i386 x86: remove virt_to_bus in pci-dma_64.c x86: adjust dma_free_coherent for i386 x86: move bad_dma_address x86: isolate coherent mapping functions x86: move dma_coherent functions to pci-dma.c x86: merge iommu initialization parameters x86: merge dma_supported x86: move pci fixup to pci-dma.c x86: move x86_64-specific to common code. x86: move initialization functions to pci-dma.c ...
This commit is contained in:
commit
5f033bb9bc
107 changed files with 1577 additions and 1117 deletions
|
@ -282,6 +282,13 @@ Why: Not used in-tree. The current out-of-tree users used it to
|
||||||
out-of-tree driver.
|
out-of-tree driver.
|
||||||
Who: Thomas Gleixner <tglx@linutronix.de>
|
Who: Thomas Gleixner <tglx@linutronix.de>
|
||||||
|
|
||||||
|
----------------------------
|
||||||
|
|
||||||
|
What: usedac i386 kernel parameter
|
||||||
|
When: 2.6.27
|
||||||
|
Why: replaced by allowdac and no dac combination
|
||||||
|
Who: Glauber Costa <gcosta@redhat.com>
|
||||||
|
|
||||||
---------------------------
|
---------------------------
|
||||||
|
|
||||||
What: /sys/o2cb symlink
|
What: /sys/o2cb symlink
|
||||||
|
|
|
@ -1280,8 +1280,16 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||||
noexec [IA-64]
|
noexec [IA-64]
|
||||||
|
|
||||||
noexec [X86-32,X86-64]
|
noexec [X86-32,X86-64]
|
||||||
|
On X86-32 available only on PAE configured kernels.
|
||||||
noexec=on: enable non-executable mappings (default)
|
noexec=on: enable non-executable mappings (default)
|
||||||
noexec=off: disable nn-executable mappings
|
noexec=off: disable non-executable mappings
|
||||||
|
|
||||||
|
noexec32 [X86-64]
|
||||||
|
This affects only 32-bit executables.
|
||||||
|
noexec32=on: enable non-executable mappings (default)
|
||||||
|
read doesn't imply executable mappings
|
||||||
|
noexec32=off: disable non-executable mappings
|
||||||
|
read implies executable mappings
|
||||||
|
|
||||||
nofxsr [BUGS=X86-32] Disables x86 floating point extended
|
nofxsr [BUGS=X86-32] Disables x86 floating point extended
|
||||||
register save and restore. The kernel will only save
|
register save and restore. The kernel will only save
|
||||||
|
|
96
Documentation/prctl/disable-tsc-ctxt-sw-stress-test.c
Normal file
96
Documentation/prctl/disable-tsc-ctxt-sw-stress-test.c
Normal file
|
@ -0,0 +1,96 @@
|
||||||
|
/*
|
||||||
|
* Tests for prctl(PR_GET_TSC, ...) / prctl(PR_SET_TSC, ...)
|
||||||
|
*
|
||||||
|
* Tests if the control register is updated correctly
|
||||||
|
* at context switches
|
||||||
|
*
|
||||||
|
* Warning: this test will cause a very high load for a few seconds
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <signal.h>
|
||||||
|
#include <inttypes.h>
|
||||||
|
#include <wait.h>
|
||||||
|
|
||||||
|
|
||||||
|
#include <sys/prctl.h>
|
||||||
|
#include <linux/prctl.h>
|
||||||
|
|
||||||
|
/* Get/set the process' ability to use the timestamp counter instruction */
|
||||||
|
#ifndef PR_GET_TSC
|
||||||
|
#define PR_GET_TSC 25
|
||||||
|
#define PR_SET_TSC 26
|
||||||
|
# define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */
|
||||||
|
# define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */
|
||||||
|
#endif
|
||||||
|
|
||||||
|
uint64_t rdtsc() {
|
||||||
|
uint32_t lo, hi;
|
||||||
|
/* We cannot use "=A", since this would use %rax on x86_64 */
|
||||||
|
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
|
||||||
|
return (uint64_t)hi << 32 | lo;
|
||||||
|
}
|
||||||
|
|
||||||
|
void sigsegv_expect(int sig)
|
||||||
|
{
|
||||||
|
/* */
|
||||||
|
}
|
||||||
|
|
||||||
|
void segvtask(void)
|
||||||
|
{
|
||||||
|
if (prctl(PR_SET_TSC, PR_TSC_SIGSEGV) < 0)
|
||||||
|
{
|
||||||
|
perror("prctl");
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
signal(SIGSEGV, sigsegv_expect);
|
||||||
|
alarm(10);
|
||||||
|
rdtsc();
|
||||||
|
fprintf(stderr, "FATAL ERROR, rdtsc() succeeded while disabled\n");
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void sigsegv_fail(int sig)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "FATAL ERROR, rdtsc() failed while enabled\n");
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void rdtsctask(void)
|
||||||
|
{
|
||||||
|
if (prctl(PR_SET_TSC, PR_TSC_ENABLE) < 0)
|
||||||
|
{
|
||||||
|
perror("prctl");
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
signal(SIGSEGV, sigsegv_fail);
|
||||||
|
alarm(10);
|
||||||
|
for(;;) rdtsc();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int main(int argc, char **argv)
|
||||||
|
{
|
||||||
|
int n_tasks = 100, i;
|
||||||
|
|
||||||
|
fprintf(stderr, "[No further output means we're allright]\n");
|
||||||
|
|
||||||
|
for (i=0; i<n_tasks; i++)
|
||||||
|
if (fork() == 0)
|
||||||
|
{
|
||||||
|
if (i & 1)
|
||||||
|
segvtask();
|
||||||
|
else
|
||||||
|
rdtsctask();
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i=0; i<n_tasks; i++)
|
||||||
|
wait(NULL);
|
||||||
|
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
|
95
Documentation/prctl/disable-tsc-on-off-stress-test.c
Normal file
95
Documentation/prctl/disable-tsc-on-off-stress-test.c
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
/*
|
||||||
|
* Tests for prctl(PR_GET_TSC, ...) / prctl(PR_SET_TSC, ...)
|
||||||
|
*
|
||||||
|
* Tests if the control register is updated correctly
|
||||||
|
* when set with prctl()
|
||||||
|
*
|
||||||
|
* Warning: this test will cause a very high load for a few seconds
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <signal.h>
|
||||||
|
#include <inttypes.h>
|
||||||
|
#include <wait.h>
|
||||||
|
|
||||||
|
|
||||||
|
#include <sys/prctl.h>
|
||||||
|
#include <linux/prctl.h>
|
||||||
|
|
||||||
|
/* Get/set the process' ability to use the timestamp counter instruction */
|
||||||
|
#ifndef PR_GET_TSC
|
||||||
|
#define PR_GET_TSC 25
|
||||||
|
#define PR_SET_TSC 26
|
||||||
|
# define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */
|
||||||
|
# define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* snippet from wikipedia :-) */
|
||||||
|
|
||||||
|
uint64_t rdtsc() {
|
||||||
|
uint32_t lo, hi;
|
||||||
|
/* We cannot use "=A", since this would use %rax on x86_64 */
|
||||||
|
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
|
||||||
|
return (uint64_t)hi << 32 | lo;
|
||||||
|
}
|
||||||
|
|
||||||
|
int should_segv = 0;
|
||||||
|
|
||||||
|
void sigsegv_cb(int sig)
|
||||||
|
{
|
||||||
|
if (!should_segv)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "FATAL ERROR, rdtsc() failed while enabled\n");
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
if (prctl(PR_SET_TSC, PR_TSC_ENABLE) < 0)
|
||||||
|
{
|
||||||
|
perror("prctl");
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
should_segv = 0;
|
||||||
|
|
||||||
|
rdtsc();
|
||||||
|
}
|
||||||
|
|
||||||
|
void task(void)
|
||||||
|
{
|
||||||
|
signal(SIGSEGV, sigsegv_cb);
|
||||||
|
alarm(10);
|
||||||
|
for(;;)
|
||||||
|
{
|
||||||
|
rdtsc();
|
||||||
|
if (should_segv)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "FATAL ERROR, rdtsc() succeeded while disabled\n");
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
if (prctl(PR_SET_TSC, PR_TSC_SIGSEGV) < 0)
|
||||||
|
{
|
||||||
|
perror("prctl");
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
should_segv = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int main(int argc, char **argv)
|
||||||
|
{
|
||||||
|
int n_tasks = 100, i;
|
||||||
|
|
||||||
|
fprintf(stderr, "[No further output means we're allright]\n");
|
||||||
|
|
||||||
|
for (i=0; i<n_tasks; i++)
|
||||||
|
if (fork() == 0)
|
||||||
|
task();
|
||||||
|
|
||||||
|
for (i=0; i<n_tasks; i++)
|
||||||
|
wait(NULL);
|
||||||
|
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
|
94
Documentation/prctl/disable-tsc-test.c
Normal file
94
Documentation/prctl/disable-tsc-test.c
Normal file
|
@ -0,0 +1,94 @@
|
||||||
|
/*
|
||||||
|
* Tests for prctl(PR_GET_TSC, ...) / prctl(PR_SET_TSC, ...)
|
||||||
|
*
|
||||||
|
* Basic test to test behaviour of PR_GET_TSC and PR_SET_TSC
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <signal.h>
|
||||||
|
#include <inttypes.h>
|
||||||
|
|
||||||
|
|
||||||
|
#include <sys/prctl.h>
|
||||||
|
#include <linux/prctl.h>
|
||||||
|
|
||||||
|
/* Get/set the process' ability to use the timestamp counter instruction */
|
||||||
|
#ifndef PR_GET_TSC
|
||||||
|
#define PR_GET_TSC 25
|
||||||
|
#define PR_SET_TSC 26
|
||||||
|
# define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */
|
||||||
|
# define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */
|
||||||
|
#endif
|
||||||
|
|
||||||
|
const char *tsc_names[] =
|
||||||
|
{
|
||||||
|
[0] = "[not set]",
|
||||||
|
[PR_TSC_ENABLE] = "PR_TSC_ENABLE",
|
||||||
|
[PR_TSC_SIGSEGV] = "PR_TSC_SIGSEGV",
|
||||||
|
};
|
||||||
|
|
||||||
|
uint64_t rdtsc() {
|
||||||
|
uint32_t lo, hi;
|
||||||
|
/* We cannot use "=A", since this would use %rax on x86_64 */
|
||||||
|
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
|
||||||
|
return (uint64_t)hi << 32 | lo;
|
||||||
|
}
|
||||||
|
|
||||||
|
void sigsegv_cb(int sig)
|
||||||
|
{
|
||||||
|
int tsc_val = 0;
|
||||||
|
|
||||||
|
printf("[ SIG_SEGV ]\n");
|
||||||
|
printf("prctl(PR_GET_TSC, &tsc_val); ");
|
||||||
|
fflush(stdout);
|
||||||
|
|
||||||
|
if ( prctl(PR_GET_TSC, &tsc_val) == -1)
|
||||||
|
perror("prctl");
|
||||||
|
|
||||||
|
printf("tsc_val == %s\n", tsc_names[tsc_val]);
|
||||||
|
printf("prctl(PR_SET_TSC, PR_TSC_ENABLE)\n");
|
||||||
|
fflush(stdout);
|
||||||
|
if ( prctl(PR_SET_TSC, PR_TSC_ENABLE) == -1)
|
||||||
|
perror("prctl");
|
||||||
|
|
||||||
|
printf("rdtsc() == ");
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char **argv)
|
||||||
|
{
|
||||||
|
int tsc_val = 0;
|
||||||
|
|
||||||
|
signal(SIGSEGV, sigsegv_cb);
|
||||||
|
|
||||||
|
printf("rdtsc() == %llu\n", (unsigned long long)rdtsc());
|
||||||
|
printf("prctl(PR_GET_TSC, &tsc_val); ");
|
||||||
|
fflush(stdout);
|
||||||
|
|
||||||
|
if ( prctl(PR_GET_TSC, &tsc_val) == -1)
|
||||||
|
perror("prctl");
|
||||||
|
|
||||||
|
printf("tsc_val == %s\n", tsc_names[tsc_val]);
|
||||||
|
printf("rdtsc() == %llu\n", (unsigned long long)rdtsc());
|
||||||
|
printf("prctl(PR_SET_TSC, PR_TSC_ENABLE)\n");
|
||||||
|
fflush(stdout);
|
||||||
|
|
||||||
|
if ( prctl(PR_SET_TSC, PR_TSC_ENABLE) == -1)
|
||||||
|
perror("prctl");
|
||||||
|
|
||||||
|
printf("rdtsc() == %llu\n", (unsigned long long)rdtsc());
|
||||||
|
printf("prctl(PR_SET_TSC, PR_TSC_SIGSEGV)\n");
|
||||||
|
fflush(stdout);
|
||||||
|
|
||||||
|
if ( prctl(PR_SET_TSC, PR_TSC_SIGSEGV) == -1)
|
||||||
|
perror("prctl");
|
||||||
|
|
||||||
|
printf("rdtsc() == ");
|
||||||
|
fflush(stdout);
|
||||||
|
printf("%llu\n", (unsigned long long)rdtsc());
|
||||||
|
fflush(stdout);
|
||||||
|
|
||||||
|
exit(EXIT_SUCCESS);
|
||||||
|
}
|
||||||
|
|
|
@ -903,6 +903,15 @@ config X86_64_ACPI_NUMA
|
||||||
help
|
help
|
||||||
Enable ACPI SRAT based node topology detection.
|
Enable ACPI SRAT based node topology detection.
|
||||||
|
|
||||||
|
# Some NUMA nodes have memory ranges that span
|
||||||
|
# other nodes. Even though a pfn is valid and
|
||||||
|
# between a node's start and end pfns, it may not
|
||||||
|
# reside on that node. See memmap_init_zone()
|
||||||
|
# for details.
|
||||||
|
config NODES_SPAN_OTHER_NODES
|
||||||
|
def_bool y
|
||||||
|
depends on X86_64_ACPI_NUMA
|
||||||
|
|
||||||
config NUMA_EMU
|
config NUMA_EMU
|
||||||
bool "NUMA emulation"
|
bool "NUMA emulation"
|
||||||
depends on X86_64 && NUMA
|
depends on X86_64 && NUMA
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/a20.c
|
|
||||||
*
|
|
||||||
* Enable A20 gate (return -1 on failure)
|
* Enable A20 gate (return -1 on failure)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -12,8 +12,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/apm.c
|
|
||||||
*
|
|
||||||
* Get APM BIOS information
|
* Get APM BIOS information
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/bitops.h
|
|
||||||
*
|
|
||||||
* Very simple bitops for the boot code.
|
* Very simple bitops for the boot code.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/boot.h
|
|
||||||
*
|
|
||||||
* Header file for the real-mode kernel code
|
* Header file for the real-mode kernel code
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/cmdline.c
|
|
||||||
*
|
|
||||||
* Simple command-line parser for early boot.
|
* Simple command-line parser for early boot.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -130,7 +130,7 @@ relocated:
|
||||||
/*
|
/*
|
||||||
* Setup the stack for the decompressor
|
* Setup the stack for the decompressor
|
||||||
*/
|
*/
|
||||||
leal stack_end(%ebx), %esp
|
leal boot_stack_end(%ebx), %esp
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do the decompression, and jump to the new kernel..
|
* Do the decompression, and jump to the new kernel..
|
||||||
|
@ -142,8 +142,8 @@ relocated:
|
||||||
pushl %eax # input_len
|
pushl %eax # input_len
|
||||||
leal input_data(%ebx), %eax
|
leal input_data(%ebx), %eax
|
||||||
pushl %eax # input_data
|
pushl %eax # input_data
|
||||||
leal _end(%ebx), %eax
|
leal boot_heap(%ebx), %eax
|
||||||
pushl %eax # end of the image as third argument
|
pushl %eax # heap area as third argument
|
||||||
pushl %esi # real mode pointer as second arg
|
pushl %esi # real mode pointer as second arg
|
||||||
call decompress_kernel
|
call decompress_kernel
|
||||||
addl $20, %esp
|
addl $20, %esp
|
||||||
|
@ -181,7 +181,10 @@ relocated:
|
||||||
jmp *%ebp
|
jmp *%ebp
|
||||||
|
|
||||||
.bss
|
.bss
|
||||||
|
/* Stack and heap for uncompression */
|
||||||
.balign 4
|
.balign 4
|
||||||
stack:
|
boot_heap:
|
||||||
.fill 4096, 1, 0
|
.fill BOOT_HEAP_SIZE, 1, 0
|
||||||
stack_end:
|
boot_stack:
|
||||||
|
.fill BOOT_STACK_SIZE, 1, 0
|
||||||
|
boot_stack_end:
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include <asm/segment.h>
|
#include <asm/segment.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
#include <asm/boot.h>
|
||||||
#include <asm/msr.h>
|
#include <asm/msr.h>
|
||||||
#include <asm/asm-offsets.h>
|
#include <asm/asm-offsets.h>
|
||||||
|
|
||||||
|
@ -62,7 +63,7 @@ startup_32:
|
||||||
subl $1b, %ebp
|
subl $1b, %ebp
|
||||||
|
|
||||||
/* setup a stack and make sure cpu supports long mode. */
|
/* setup a stack and make sure cpu supports long mode. */
|
||||||
movl $user_stack_end, %eax
|
movl $boot_stack_end, %eax
|
||||||
addl %ebp, %eax
|
addl %ebp, %eax
|
||||||
movl %eax, %esp
|
movl %eax, %esp
|
||||||
|
|
||||||
|
@ -243,9 +244,9 @@ ENTRY(startup_64)
|
||||||
/* Copy the compressed kernel to the end of our buffer
|
/* Copy the compressed kernel to the end of our buffer
|
||||||
* where decompression in place becomes safe.
|
* where decompression in place becomes safe.
|
||||||
*/
|
*/
|
||||||
leaq _end(%rip), %r8
|
leaq _end_before_pgt(%rip), %r8
|
||||||
leaq _end(%rbx), %r9
|
leaq _end_before_pgt(%rbx), %r9
|
||||||
movq $_end /* - $startup_32 */, %rcx
|
movq $_end_before_pgt /* - $startup_32 */, %rcx
|
||||||
1: subq $8, %r8
|
1: subq $8, %r8
|
||||||
subq $8, %r9
|
subq $8, %r9
|
||||||
movq 0(%r8), %rax
|
movq 0(%r8), %rax
|
||||||
|
@ -267,14 +268,14 @@ relocated:
|
||||||
*/
|
*/
|
||||||
xorq %rax, %rax
|
xorq %rax, %rax
|
||||||
leaq _edata(%rbx), %rdi
|
leaq _edata(%rbx), %rdi
|
||||||
leaq _end(%rbx), %rcx
|
leaq _end_before_pgt(%rbx), %rcx
|
||||||
subq %rdi, %rcx
|
subq %rdi, %rcx
|
||||||
cld
|
cld
|
||||||
rep
|
rep
|
||||||
stosb
|
stosb
|
||||||
|
|
||||||
/* Setup the stack */
|
/* Setup the stack */
|
||||||
leaq user_stack_end(%rip), %rsp
|
leaq boot_stack_end(%rip), %rsp
|
||||||
|
|
||||||
/* zero EFLAGS after setting rsp */
|
/* zero EFLAGS after setting rsp */
|
||||||
pushq $0
|
pushq $0
|
||||||
|
@ -285,7 +286,7 @@ relocated:
|
||||||
*/
|
*/
|
||||||
pushq %rsi # Save the real mode argument
|
pushq %rsi # Save the real mode argument
|
||||||
movq %rsi, %rdi # real mode address
|
movq %rsi, %rdi # real mode address
|
||||||
leaq _heap(%rip), %rsi # _heap
|
leaq boot_heap(%rip), %rsi # malloc area for uncompression
|
||||||
leaq input_data(%rip), %rdx # input_data
|
leaq input_data(%rip), %rdx # input_data
|
||||||
movl input_len(%rip), %eax
|
movl input_len(%rip), %eax
|
||||||
movq %rax, %rcx # input_len
|
movq %rax, %rcx # input_len
|
||||||
|
@ -310,9 +311,12 @@ gdt:
|
||||||
.quad 0x0080890000000000 /* TS descriptor */
|
.quad 0x0080890000000000 /* TS descriptor */
|
||||||
.quad 0x0000000000000000 /* TS continued */
|
.quad 0x0000000000000000 /* TS continued */
|
||||||
gdt_end:
|
gdt_end:
|
||||||
.bss
|
|
||||||
/* Stack for uncompression */
|
.bss
|
||||||
.balign 4
|
/* Stack and heap for uncompression */
|
||||||
user_stack:
|
.balign 4
|
||||||
.fill 4096,4,0
|
boot_heap:
|
||||||
user_stack_end:
|
.fill BOOT_HEAP_SIZE, 1, 0
|
||||||
|
boot_stack:
|
||||||
|
.fill BOOT_STACK_SIZE, 1, 0
|
||||||
|
boot_stack_end:
|
||||||
|
|
|
@ -217,12 +217,6 @@ static void putstr(const char *);
|
||||||
static memptr free_mem_ptr;
|
static memptr free_mem_ptr;
|
||||||
static memptr free_mem_end_ptr;
|
static memptr free_mem_end_ptr;
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
#define HEAP_SIZE 0x7000
|
|
||||||
#else
|
|
||||||
#define HEAP_SIZE 0x4000
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static char *vidmem;
|
static char *vidmem;
|
||||||
static int vidport;
|
static int vidport;
|
||||||
static int lines, cols;
|
static int lines, cols;
|
||||||
|
@ -449,7 +443,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
|
||||||
|
|
||||||
window = output; /* Output buffer (Normally at 1M) */
|
window = output; /* Output buffer (Normally at 1M) */
|
||||||
free_mem_ptr = heap; /* Heap */
|
free_mem_ptr = heap; /* Heap */
|
||||||
free_mem_end_ptr = heap + HEAP_SIZE;
|
free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
|
||||||
inbuf = input_data; /* Input buffer */
|
inbuf = input_data; /* Input buffer */
|
||||||
insize = input_len;
|
insize = input_len;
|
||||||
inptr = 0;
|
inptr = 0;
|
||||||
|
|
|
@ -39,10 +39,10 @@ SECTIONS
|
||||||
*(.bss.*)
|
*(.bss.*)
|
||||||
*(COMMON)
|
*(COMMON)
|
||||||
. = ALIGN(8);
|
. = ALIGN(8);
|
||||||
_end = . ;
|
_end_before_pgt = . ;
|
||||||
. = ALIGN(4096);
|
. = ALIGN(4096);
|
||||||
pgtable = . ;
|
pgtable = . ;
|
||||||
. = . + 4096 * 6;
|
. = . + 4096 * 6;
|
||||||
_heap = .;
|
_ebss = .;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/copy.S
|
|
||||||
*
|
|
||||||
* Memory copy routines
|
* Memory copy routines
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/cpucheck.c
|
|
||||||
*
|
|
||||||
* Check for obligatory CPU features and abort if the features are not
|
* Check for obligatory CPU features and abort if the features are not
|
||||||
* present. This code should be compilable as 16-, 32- or 64-bit
|
* present. This code should be compilable as 16-, 32- or 64-bit
|
||||||
* code, so be very careful with types and inline assembly.
|
* code, so be very careful with types and inline assembly.
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/edd.c
|
|
||||||
*
|
|
||||||
* Get EDD BIOS disk information
|
* Get EDD BIOS disk information
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,5 @@
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
#
|
#
|
||||||
# arch/i386/boot/install.sh
|
|
||||||
#
|
|
||||||
# This file is subject to the terms and conditions of the GNU General Public
|
# This file is subject to the terms and conditions of the GNU General Public
|
||||||
# License. See the file "COPYING" in the main directory of this archive
|
# License. See the file "COPYING" in the main directory of this archive
|
||||||
# for more details.
|
# for more details.
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/main.c
|
|
||||||
*
|
|
||||||
* Main module for the real-mode kernel code
|
* Main module for the real-mode kernel code
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/mca.c
|
|
||||||
*
|
|
||||||
* Get the MCA system description table
|
* Get the MCA system description table
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/memory.c
|
|
||||||
*
|
|
||||||
* Memory detection code
|
* Memory detection code
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/pm.c
|
|
||||||
*
|
|
||||||
* Prepare the machine for transition to protected mode.
|
* Prepare the machine for transition to protected mode.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/pmjump.S
|
|
||||||
*
|
|
||||||
* The actual transition into protected mode
|
* The actual transition into protected mode
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/printf.c
|
|
||||||
*
|
|
||||||
* Oh, it's a waste of space, but oh-so-yummy for debugging. This
|
* Oh, it's a waste of space, but oh-so-yummy for debugging. This
|
||||||
* version of printf() does not include 64-bit support. "Live with
|
* version of printf() does not include 64-bit support. "Live with
|
||||||
* it."
|
* it."
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/string.c
|
|
||||||
*
|
|
||||||
* Very basic string functions
|
* Very basic string functions
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/tty.c
|
|
||||||
*
|
|
||||||
* Very simple screen I/O
|
* Very simple screen I/O
|
||||||
* XXX: Probably should add very simple serial I/O?
|
* XXX: Probably should add very simple serial I/O?
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/version.c
|
|
||||||
*
|
|
||||||
* Kernel version string
|
* Kernel version string
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/video-bios.c
|
|
||||||
*
|
|
||||||
* Standard video BIOS modes
|
* Standard video BIOS modes
|
||||||
*
|
*
|
||||||
* We have two options for this; silent and scanned.
|
* We have two options for this; silent and scanned.
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/video-vesa.c
|
|
||||||
*
|
|
||||||
* VESA text modes
|
* VESA text modes
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/video-vga.c
|
|
||||||
*
|
|
||||||
* Common all-VGA modes
|
* Common all-VGA modes
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/video.c
|
|
||||||
*
|
|
||||||
* Select video mode
|
* Select video mode
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/video.h
|
|
||||||
*
|
|
||||||
* Header file for the real-mode video probing code
|
* Header file for the real-mode video probing code
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arch/i386/boot/voyager.c
|
|
||||||
*
|
|
||||||
* Get the Voyager config information
|
* Get the Voyager config information
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -22,13 +22,14 @@ obj-y += setup_$(BITS).o i8259_$(BITS).o setup.o
|
||||||
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
|
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
|
||||||
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
|
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
|
||||||
obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o setup64.o
|
obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o setup64.o
|
||||||
obj-y += pci-dma_$(BITS).o bootflag.o e820_$(BITS).o
|
obj-y += bootflag.o e820_$(BITS).o
|
||||||
obj-y += quirks.o i8237.o topology.o kdebugfs.o
|
obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o
|
||||||
obj-y += alternative.o i8253.o
|
obj-y += alternative.o i8253.o pci-nommu.o
|
||||||
obj-$(CONFIG_X86_64) += pci-nommu_64.o bugs_64.o
|
obj-$(CONFIG_X86_64) += bugs_64.o
|
||||||
obj-y += tsc_$(BITS).o io_delay.o rtc.o
|
obj-y += tsc_$(BITS).o io_delay.o rtc.o
|
||||||
|
|
||||||
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
|
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
|
||||||
|
obj-y += process.o
|
||||||
obj-y += i387.o
|
obj-y += i387.o
|
||||||
obj-y += ptrace.o
|
obj-y += ptrace.o
|
||||||
obj-y += ds.o
|
obj-y += ds.o
|
||||||
|
|
|
@ -1,6 +1,4 @@
|
||||||
/*
|
/*
|
||||||
* arch/i386/kernel/acpi/cstate.c
|
|
||||||
*
|
|
||||||
* Copyright (C) 2005 Intel Corporation
|
* Copyright (C) 2005 Intel Corporation
|
||||||
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
|
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
|
||||||
* - Added _PDC for SMP C-states on Intel CPUs
|
* - Added _PDC for SMP C-states on Intel CPUs
|
||||||
|
|
|
@ -1,6 +1,4 @@
|
||||||
/*
|
/*
|
||||||
* arch/i386/kernel/acpi/processor.c
|
|
||||||
*
|
|
||||||
* Copyright (C) 2005 Intel Corporation
|
* Copyright (C) 2005 Intel Corporation
|
||||||
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
|
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
|
||||||
* - Added _PDC for platforms with Intel CPUs
|
* - Added _PDC for platforms with Intel CPUs
|
||||||
|
|
|
@ -289,8 +289,8 @@ static int __init cpufreq_p4_init(void)
|
||||||
if (c->x86_vendor != X86_VENDOR_INTEL)
|
if (c->x86_vendor != X86_VENDOR_INTEL)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (!test_bit(X86_FEATURE_ACPI, c->x86_capability) ||
|
if (!test_cpu_cap(c, X86_FEATURE_ACPI) ||
|
||||||
!test_bit(X86_FEATURE_ACC, c->x86_capability))
|
!test_cpu_cap(c, X86_FEATURE_ACC))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
ret = cpufreq_register_driver(&p4clockmod_driver);
|
ret = cpufreq_register_driver(&p4clockmod_driver);
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
/*
|
/*
|
||||||
* linux/arch/i386/kernel/cpu/mcheck/therm_throt.c
|
|
||||||
*
|
*
|
||||||
* Thermal throttle event support code (such as syslog messaging and rate
|
* Thermal throttle event support code (such as syslog messaging and rate
|
||||||
* limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c).
|
* limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c).
|
||||||
|
|
|
@ -475,7 +475,7 @@ int __init copy_e820_map(struct e820entry *biosmap, int nr_map)
|
||||||
/*
|
/*
|
||||||
* Find the highest page frame number we have available
|
* Find the highest page frame number we have available
|
||||||
*/
|
*/
|
||||||
void __init find_max_pfn(void)
|
void __init propagate_e820_map(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -704,7 +704,7 @@ static int __init parse_memmap(char *arg)
|
||||||
* size before original memory map is
|
* size before original memory map is
|
||||||
* reset.
|
* reset.
|
||||||
*/
|
*/
|
||||||
find_max_pfn();
|
propagate_e820_map();
|
||||||
saved_max_pfn = max_pfn;
|
saved_max_pfn = max_pfn;
|
||||||
#endif
|
#endif
|
||||||
e820.nr_map = 0;
|
e820.nr_map = 0;
|
||||||
|
|
|
@ -96,7 +96,7 @@ void __init early_res_to_bootmem(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check for already reserved areas */
|
/* Check for already reserved areas */
|
||||||
static inline int
|
static inline int __init
|
||||||
bad_addr(unsigned long *addrp, unsigned long size, unsigned long align)
|
bad_addr(unsigned long *addrp, unsigned long size, unsigned long align)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
@ -116,7 +116,7 @@ again:
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check for already reserved areas */
|
/* Check for already reserved areas */
|
||||||
static inline int
|
static inline int __init
|
||||||
bad_addr_size(unsigned long *addrp, unsigned long *sizep, unsigned long align)
|
bad_addr_size(unsigned long *addrp, unsigned long *sizep, unsigned long align)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
|
@ -383,6 +383,7 @@ static void __init runtime_code_page_mkexec(void)
|
||||||
{
|
{
|
||||||
efi_memory_desc_t *md;
|
efi_memory_desc_t *md;
|
||||||
void *p;
|
void *p;
|
||||||
|
u64 addr, npages;
|
||||||
|
|
||||||
/* Make EFI runtime service code area executable */
|
/* Make EFI runtime service code area executable */
|
||||||
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
||||||
|
@ -391,7 +392,10 @@ static void __init runtime_code_page_mkexec(void)
|
||||||
if (md->type != EFI_RUNTIME_SERVICES_CODE)
|
if (md->type != EFI_RUNTIME_SERVICES_CODE)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
set_memory_x(md->virt_addr, md->num_pages);
|
addr = md->virt_addr;
|
||||||
|
npages = md->num_pages;
|
||||||
|
memrange_efi_to_native(&addr, &npages);
|
||||||
|
set_memory_x(addr, npages);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -408,7 +412,7 @@ void __init efi_enter_virtual_mode(void)
|
||||||
efi_memory_desc_t *md;
|
efi_memory_desc_t *md;
|
||||||
efi_status_t status;
|
efi_status_t status;
|
||||||
unsigned long size;
|
unsigned long size;
|
||||||
u64 end, systab;
|
u64 end, systab, addr, npages;
|
||||||
void *p, *va;
|
void *p, *va;
|
||||||
|
|
||||||
efi.systab = NULL;
|
efi.systab = NULL;
|
||||||
|
@ -420,7 +424,7 @@ void __init efi_enter_virtual_mode(void)
|
||||||
size = md->num_pages << EFI_PAGE_SHIFT;
|
size = md->num_pages << EFI_PAGE_SHIFT;
|
||||||
end = md->phys_addr + size;
|
end = md->phys_addr + size;
|
||||||
|
|
||||||
if ((end >> PAGE_SHIFT) <= max_pfn_mapped)
|
if (PFN_UP(end) <= max_pfn_mapped)
|
||||||
va = __va(md->phys_addr);
|
va = __va(md->phys_addr);
|
||||||
else
|
else
|
||||||
va = efi_ioremap(md->phys_addr, size);
|
va = efi_ioremap(md->phys_addr, size);
|
||||||
|
@ -433,8 +437,12 @@ void __init efi_enter_virtual_mode(void)
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(md->attribute & EFI_MEMORY_WB))
|
if (!(md->attribute & EFI_MEMORY_WB)) {
|
||||||
set_memory_uc(md->virt_addr, md->num_pages);
|
addr = md->virt_addr;
|
||||||
|
npages = md->num_pages;
|
||||||
|
memrange_efi_to_native(&addr, &npages);
|
||||||
|
set_memory_uc(addr, npages);
|
||||||
|
}
|
||||||
|
|
||||||
systab = (u64) (unsigned long) efi_phys.systab;
|
systab = (u64) (unsigned long) efi_phys.systab;
|
||||||
if (md->phys_addr <= systab && systab < end) {
|
if (md->phys_addr <= systab && systab < end) {
|
||||||
|
|
|
@ -105,14 +105,14 @@ void __init efi_reserve_bootmem(void)
|
||||||
|
|
||||||
void __iomem * __init efi_ioremap(unsigned long phys_addr, unsigned long size)
|
void __iomem * __init efi_ioremap(unsigned long phys_addr, unsigned long size)
|
||||||
{
|
{
|
||||||
static unsigned pages_mapped;
|
static unsigned pages_mapped __initdata;
|
||||||
unsigned i, pages;
|
unsigned i, pages;
|
||||||
|
unsigned long offset;
|
||||||
|
|
||||||
/* phys_addr and size must be page aligned */
|
pages = PFN_UP(phys_addr + size) - PFN_DOWN(phys_addr);
|
||||||
if ((phys_addr & ~PAGE_MASK) || (size & ~PAGE_MASK))
|
offset = phys_addr & ~PAGE_MASK;
|
||||||
return NULL;
|
phys_addr &= PAGE_MASK;
|
||||||
|
|
||||||
pages = size >> PAGE_SHIFT;
|
|
||||||
if (pages_mapped + pages > MAX_EFI_IO_PAGES)
|
if (pages_mapped + pages > MAX_EFI_IO_PAGES)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -124,5 +124,5 @@ void __iomem * __init efi_ioremap(unsigned long phys_addr, unsigned long size)
|
||||||
}
|
}
|
||||||
|
|
||||||
return (void __iomem *)__fix_to_virt(FIX_EFI_IO_MAP_FIRST_PAGE - \
|
return (void __iomem *)__fix_to_virt(FIX_EFI_IO_MAP_FIRST_PAGE - \
|
||||||
(pages_mapped - pages));
|
(pages_mapped - pages)) + offset;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
/*
|
/*
|
||||||
* linux/arch/i386/entry.S
|
|
||||||
*
|
*
|
||||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -61,26 +61,31 @@ int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
|
||||||
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
|
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
|
||||||
(phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
|
(phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
|
||||||
(((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
|
(((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
|
||||||
(6 << UVH_IPI_INT_DELIVERY_MODE_SHFT);
|
APIC_DM_INIT;
|
||||||
|
uv_write_global_mmr64(nasid, UVH_IPI_INT, val);
|
||||||
|
mdelay(10);
|
||||||
|
|
||||||
|
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
|
||||||
|
(phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
|
||||||
|
(((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
|
||||||
|
APIC_DM_STARTUP;
|
||||||
uv_write_global_mmr64(nasid, UVH_IPI_INT, val);
|
uv_write_global_mmr64(nasid, UVH_IPI_INT, val);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void uv_send_IPI_one(int cpu, int vector)
|
static void uv_send_IPI_one(int cpu, int vector)
|
||||||
{
|
{
|
||||||
unsigned long val, apicid;
|
unsigned long val, apicid, lapicid;
|
||||||
int nasid;
|
int nasid;
|
||||||
|
|
||||||
apicid = per_cpu(x86_cpu_to_apicid, cpu); /* ZZZ - cache node-local ? */
|
apicid = per_cpu(x86_cpu_to_apicid, cpu); /* ZZZ - cache node-local ? */
|
||||||
|
lapicid = apicid & 0x3f; /* ZZZ macro needed */
|
||||||
nasid = uv_apicid_to_nasid(apicid);
|
nasid = uv_apicid_to_nasid(apicid);
|
||||||
val =
|
val =
|
||||||
(1UL << UVH_IPI_INT_SEND_SHFT) | (apicid <<
|
(1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid <<
|
||||||
UVH_IPI_INT_APIC_ID_SHFT) |
|
UVH_IPI_INT_APIC_ID_SHFT) |
|
||||||
(vector << UVH_IPI_INT_VECTOR_SHFT);
|
(vector << UVH_IPI_INT_VECTOR_SHFT);
|
||||||
uv_write_global_mmr64(nasid, UVH_IPI_INT, val);
|
uv_write_global_mmr64(nasid, UVH_IPI_INT, val);
|
||||||
printk(KERN_DEBUG
|
|
||||||
"UV: IPI to cpu %d, apicid 0x%lx, vec %d, nasid%d, val 0x%lx\n",
|
|
||||||
cpu, apicid, vector, nasid, val);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void uv_send_IPI_mask(cpumask_t mask, int vector)
|
static void uv_send_IPI_mask(cpumask_t mask, int vector)
|
||||||
|
|
|
@ -146,6 +146,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
|
||||||
|
|
||||||
reserve_early(__pa_symbol(&_text), __pa_symbol(&_end), "TEXT DATA BSS");
|
reserve_early(__pa_symbol(&_text), __pa_symbol(&_end), "TEXT DATA BSS");
|
||||||
|
|
||||||
|
#ifdef CONFIG_BLK_DEV_INITRD
|
||||||
/* Reserve INITRD */
|
/* Reserve INITRD */
|
||||||
if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
|
if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
|
||||||
unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
|
unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
|
||||||
|
@ -153,6 +154,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
|
||||||
unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
|
unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
|
||||||
reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
|
reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
reserve_ebda_region();
|
reserve_ebda_region();
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
/*
|
/*
|
||||||
* linux/arch/i386/kernel/head.S -- the 32-bit startup code.
|
|
||||||
*
|
*
|
||||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||||
*
|
*
|
||||||
|
|
|
@ -35,17 +35,18 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
|
static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
|
||||||
|
unsigned int xstate_size;
|
||||||
|
static struct i387_fxsave_struct fx_scratch __cpuinitdata;
|
||||||
|
|
||||||
void mxcsr_feature_mask_init(void)
|
void __cpuinit mxcsr_feature_mask_init(void)
|
||||||
{
|
{
|
||||||
unsigned long mask = 0;
|
unsigned long mask = 0;
|
||||||
|
|
||||||
clts();
|
clts();
|
||||||
if (cpu_has_fxsr) {
|
if (cpu_has_fxsr) {
|
||||||
memset(¤t->thread.i387.fxsave, 0,
|
memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
|
||||||
sizeof(struct i387_fxsave_struct));
|
asm volatile("fxsave %0" : : "m" (fx_scratch));
|
||||||
asm volatile("fxsave %0" : : "m" (current->thread.i387.fxsave));
|
mask = fx_scratch.mxcsr_mask;
|
||||||
mask = current->thread.i387.fxsave.mxcsr_mask;
|
|
||||||
if (mask == 0)
|
if (mask == 0)
|
||||||
mask = 0x0000ffbf;
|
mask = 0x0000ffbf;
|
||||||
}
|
}
|
||||||
|
@ -53,6 +54,16 @@ void mxcsr_feature_mask_init(void)
|
||||||
stts();
|
stts();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void __init init_thread_xstate(void)
|
||||||
|
{
|
||||||
|
if (cpu_has_fxsr)
|
||||||
|
xstate_size = sizeof(struct i387_fxsave_struct);
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
else
|
||||||
|
xstate_size = sizeof(struct i387_fsave_struct);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
/*
|
/*
|
||||||
* Called at bootup to set up the initial FPU state that is later cloned
|
* Called at bootup to set up the initial FPU state that is later cloned
|
||||||
|
@ -61,10 +72,6 @@ void mxcsr_feature_mask_init(void)
|
||||||
void __cpuinit fpu_init(void)
|
void __cpuinit fpu_init(void)
|
||||||
{
|
{
|
||||||
unsigned long oldcr0 = read_cr0();
|
unsigned long oldcr0 = read_cr0();
|
||||||
extern void __bad_fxsave_alignment(void);
|
|
||||||
|
|
||||||
if (offsetof(struct task_struct, thread.i387.fxsave) & 15)
|
|
||||||
__bad_fxsave_alignment();
|
|
||||||
|
|
||||||
set_in_cr4(X86_CR4_OSFXSR);
|
set_in_cr4(X86_CR4_OSFXSR);
|
||||||
set_in_cr4(X86_CR4_OSXMMEXCPT);
|
set_in_cr4(X86_CR4_OSXMMEXCPT);
|
||||||
|
@ -84,32 +91,44 @@ void __cpuinit fpu_init(void)
|
||||||
* value at reset if we support XMM instructions and then
|
* value at reset if we support XMM instructions and then
|
||||||
* remeber the current task has used the FPU.
|
* remeber the current task has used the FPU.
|
||||||
*/
|
*/
|
||||||
void init_fpu(struct task_struct *tsk)
|
int init_fpu(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
if (tsk_used_math(tsk)) {
|
if (tsk_used_math(tsk)) {
|
||||||
if (tsk == current)
|
if (tsk == current)
|
||||||
unlazy_fpu(tsk);
|
unlazy_fpu(tsk);
|
||||||
return;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Memory allocation at the first usage of the FPU and other state.
|
||||||
|
*/
|
||||||
|
if (!tsk->thread.xstate) {
|
||||||
|
tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!tsk->thread.xstate)
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpu_has_fxsr) {
|
if (cpu_has_fxsr) {
|
||||||
memset(&tsk->thread.i387.fxsave, 0,
|
struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
|
||||||
sizeof(struct i387_fxsave_struct));
|
|
||||||
tsk->thread.i387.fxsave.cwd = 0x37f;
|
memset(fx, 0, xstate_size);
|
||||||
|
fx->cwd = 0x37f;
|
||||||
if (cpu_has_xmm)
|
if (cpu_has_xmm)
|
||||||
tsk->thread.i387.fxsave.mxcsr = MXCSR_DEFAULT;
|
fx->mxcsr = MXCSR_DEFAULT;
|
||||||
} else {
|
} else {
|
||||||
memset(&tsk->thread.i387.fsave, 0,
|
struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave;
|
||||||
sizeof(struct i387_fsave_struct));
|
memset(fp, 0, xstate_size);
|
||||||
tsk->thread.i387.fsave.cwd = 0xffff037fu;
|
fp->cwd = 0xffff037fu;
|
||||||
tsk->thread.i387.fsave.swd = 0xffff0000u;
|
fp->swd = 0xffff0000u;
|
||||||
tsk->thread.i387.fsave.twd = 0xffffffffu;
|
fp->twd = 0xffffffffu;
|
||||||
tsk->thread.i387.fsave.fos = 0xffff0000u;
|
fp->fos = 0xffff0000u;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Only the device not available exception or ptrace can call init_fpu.
|
* Only the device not available exception or ptrace can call init_fpu.
|
||||||
*/
|
*/
|
||||||
set_stopped_child_used_math(tsk);
|
set_stopped_child_used_math(tsk);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int fpregs_active(struct task_struct *target, const struct user_regset *regset)
|
int fpregs_active(struct task_struct *target, const struct user_regset *regset)
|
||||||
|
@ -126,13 +145,17 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
|
||||||
unsigned int pos, unsigned int count,
|
unsigned int pos, unsigned int count,
|
||||||
void *kbuf, void __user *ubuf)
|
void *kbuf, void __user *ubuf)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!cpu_has_fxsr)
|
if (!cpu_has_fxsr)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
init_fpu(target);
|
ret = init_fpu(target);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||||
&target->thread.i387.fxsave, 0, -1);
|
&target->thread.xstate->fxsave, 0, -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
|
int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
|
||||||
|
@ -144,16 +167,19 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
|
||||||
if (!cpu_has_fxsr)
|
if (!cpu_has_fxsr)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
init_fpu(target);
|
ret = init_fpu(target);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
set_stopped_child_used_math(target);
|
set_stopped_child_used_math(target);
|
||||||
|
|
||||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||||
&target->thread.i387.fxsave, 0, -1);
|
&target->thread.xstate->fxsave, 0, -1);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* mxcsr reserved bits must be masked to zero for security reasons.
|
* mxcsr reserved bits must be masked to zero for security reasons.
|
||||||
*/
|
*/
|
||||||
target->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
|
target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -233,7 +259,7 @@ static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
|
||||||
static void
|
static void
|
||||||
convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
|
convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
struct i387_fxsave_struct *fxsave = &tsk->thread.i387.fxsave;
|
struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave;
|
||||||
struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
|
struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
|
||||||
struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
|
struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
|
||||||
int i;
|
int i;
|
||||||
|
@ -273,7 +299,7 @@ static void convert_to_fxsr(struct task_struct *tsk,
|
||||||
const struct user_i387_ia32_struct *env)
|
const struct user_i387_ia32_struct *env)
|
||||||
|
|
||||||
{
|
{
|
||||||
struct i387_fxsave_struct *fxsave = &tsk->thread.i387.fxsave;
|
struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave;
|
||||||
struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
|
struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
|
||||||
struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
|
struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
|
||||||
int i;
|
int i;
|
||||||
|
@ -302,15 +328,19 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
|
||||||
void *kbuf, void __user *ubuf)
|
void *kbuf, void __user *ubuf)
|
||||||
{
|
{
|
||||||
struct user_i387_ia32_struct env;
|
struct user_i387_ia32_struct env;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!HAVE_HWFP)
|
if (!HAVE_HWFP)
|
||||||
return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
|
return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
|
||||||
|
|
||||||
init_fpu(target);
|
ret = init_fpu(target);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
if (!cpu_has_fxsr) {
|
if (!cpu_has_fxsr) {
|
||||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||||
&target->thread.i387.fsave, 0, -1);
|
&target->thread.xstate->fsave, 0,
|
||||||
|
-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (kbuf && pos == 0 && count == sizeof(env)) {
|
if (kbuf && pos == 0 && count == sizeof(env)) {
|
||||||
|
@ -333,12 +363,15 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
|
||||||
if (!HAVE_HWFP)
|
if (!HAVE_HWFP)
|
||||||
return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
|
return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
|
||||||
|
|
||||||
init_fpu(target);
|
ret = init_fpu(target);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
set_stopped_child_used_math(target);
|
set_stopped_child_used_math(target);
|
||||||
|
|
||||||
if (!cpu_has_fxsr) {
|
if (!cpu_has_fxsr) {
|
||||||
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||||
&target->thread.i387.fsave, 0, -1);
|
&target->thread.xstate->fsave, 0, -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pos > 0 || count < sizeof(env))
|
if (pos > 0 || count < sizeof(env))
|
||||||
|
@ -358,11 +391,11 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
|
||||||
static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
|
static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
|
||||||
{
|
{
|
||||||
struct task_struct *tsk = current;
|
struct task_struct *tsk = current;
|
||||||
|
struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave;
|
||||||
|
|
||||||
unlazy_fpu(tsk);
|
unlazy_fpu(tsk);
|
||||||
tsk->thread.i387.fsave.status = tsk->thread.i387.fsave.swd;
|
fp->status = fp->swd;
|
||||||
if (__copy_to_user(buf, &tsk->thread.i387.fsave,
|
if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct)))
|
||||||
sizeof(struct i387_fsave_struct)))
|
|
||||||
return -1;
|
return -1;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -370,6 +403,7 @@ static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
|
||||||
static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
|
static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
|
||||||
{
|
{
|
||||||
struct task_struct *tsk = current;
|
struct task_struct *tsk = current;
|
||||||
|
struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
|
||||||
struct user_i387_ia32_struct env;
|
struct user_i387_ia32_struct env;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
|
@ -379,12 +413,12 @@ static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
|
||||||
if (__copy_to_user(buf, &env, sizeof(env)))
|
if (__copy_to_user(buf, &env, sizeof(env)))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
err |= __put_user(tsk->thread.i387.fxsave.swd, &buf->status);
|
err |= __put_user(fx->swd, &buf->status);
|
||||||
err |= __put_user(X86_FXSR_MAGIC, &buf->magic);
|
err |= __put_user(X86_FXSR_MAGIC, &buf->magic);
|
||||||
if (err)
|
if (err)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
if (__copy_to_user(&buf->_fxsr_env[0], &tsk->thread.i387.fxsave,
|
if (__copy_to_user(&buf->_fxsr_env[0], fx,
|
||||||
sizeof(struct i387_fxsave_struct)))
|
sizeof(struct i387_fxsave_struct)))
|
||||||
return -1;
|
return -1;
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -417,7 +451,7 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
|
||||||
struct task_struct *tsk = current;
|
struct task_struct *tsk = current;
|
||||||
|
|
||||||
clear_fpu(tsk);
|
clear_fpu(tsk);
|
||||||
return __copy_from_user(&tsk->thread.i387.fsave, buf,
|
return __copy_from_user(&tsk->thread.xstate->fsave, buf,
|
||||||
sizeof(struct i387_fsave_struct));
|
sizeof(struct i387_fsave_struct));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -428,10 +462,10 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf)
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
clear_fpu(tsk);
|
clear_fpu(tsk);
|
||||||
err = __copy_from_user(&tsk->thread.i387.fxsave, &buf->_fxsr_env[0],
|
err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0],
|
||||||
sizeof(struct i387_fxsave_struct));
|
sizeof(struct i387_fxsave_struct));
|
||||||
/* mxcsr reserved bits must be masked to zero for security reasons */
|
/* mxcsr reserved bits must be masked to zero for security reasons */
|
||||||
tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
|
tsk->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask;
|
||||||
if (err || __copy_from_user(&env, buf, sizeof(env)))
|
if (err || __copy_from_user(&env, buf, sizeof(env)))
|
||||||
return 1;
|
return 1;
|
||||||
convert_to_fxsr(tsk, &env);
|
convert_to_fxsr(tsk, &env);
|
||||||
|
|
|
@ -46,11 +46,7 @@
|
||||||
#include <asm/apicdef.h>
|
#include <asm/apicdef.h>
|
||||||
#include <asm/system.h>
|
#include <asm/system.h>
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#include <mach_ipi.h>
|
||||||
# include <mach_ipi.h>
|
|
||||||
#else
|
|
||||||
# include <asm/mach_apic.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Put the error code here just in case the user cares:
|
* Put the error code here just in case the user cares:
|
||||||
|
|
|
@ -321,7 +321,8 @@ EXPORT_SYMBOL(touch_nmi_watchdog);
|
||||||
|
|
||||||
extern void die_nmi(struct pt_regs *, const char *msg);
|
extern void die_nmi(struct pt_regs *, const char *msg);
|
||||||
|
|
||||||
__kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
|
notrace __kprobes int
|
||||||
|
nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -313,7 +313,8 @@ void touch_nmi_watchdog(void)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(touch_nmi_watchdog);
|
EXPORT_SYMBOL(touch_nmi_watchdog);
|
||||||
|
|
||||||
int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
|
notrace __kprobes int
|
||||||
|
nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
|
||||||
{
|
{
|
||||||
int sum;
|
int sum;
|
||||||
int touched = 0;
|
int touched = 0;
|
||||||
|
@ -384,7 +385,8 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
|
||||||
|
|
||||||
static unsigned ignore_nmis;
|
static unsigned ignore_nmis;
|
||||||
|
|
||||||
asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code)
|
asmlinkage notrace __kprobes void
|
||||||
|
do_nmi(struct pt_regs *regs, long error_code)
|
||||||
{
|
{
|
||||||
nmi_enter();
|
nmi_enter();
|
||||||
add_pda(__nmi_count,1);
|
add_pda(__nmi_count,1);
|
||||||
|
|
|
@ -470,10 +470,11 @@ error:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static dma_addr_t calgary_map_single(struct device *dev, void *vaddr,
|
static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr,
|
||||||
size_t size, int direction)
|
size_t size, int direction)
|
||||||
{
|
{
|
||||||
dma_addr_t dma_handle = bad_dma_address;
|
dma_addr_t dma_handle = bad_dma_address;
|
||||||
|
void *vaddr = phys_to_virt(paddr);
|
||||||
unsigned long uaddr;
|
unsigned long uaddr;
|
||||||
unsigned int npages;
|
unsigned int npages;
|
||||||
struct iommu_table *tbl = find_iommu_table(dev);
|
struct iommu_table *tbl = find_iommu_table(dev);
|
||||||
|
|
|
@ -1,225 +1,133 @@
|
||||||
/*
|
#include <linux/dma-mapping.h>
|
||||||
* Dynamic DMA mapping support.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/types.h>
|
|
||||||
#include <linux/mm.h>
|
|
||||||
#include <linux/string.h>
|
|
||||||
#include <linux/pci.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <linux/dmar.h>
|
#include <linux/dmar.h>
|
||||||
#include <asm/io.h>
|
#include <linux/bootmem.h>
|
||||||
|
#include <linux/pci.h>
|
||||||
|
|
||||||
|
#include <asm/proto.h>
|
||||||
|
#include <asm/dma.h>
|
||||||
#include <asm/gart.h>
|
#include <asm/gart.h>
|
||||||
#include <asm/calgary.h>
|
#include <asm/calgary.h>
|
||||||
|
|
||||||
|
int forbid_dac __read_mostly;
|
||||||
|
EXPORT_SYMBOL(forbid_dac);
|
||||||
|
|
||||||
|
const struct dma_mapping_ops *dma_ops;
|
||||||
|
EXPORT_SYMBOL(dma_ops);
|
||||||
|
|
||||||
|
int iommu_sac_force __read_mostly = 0;
|
||||||
|
|
||||||
|
#ifdef CONFIG_IOMMU_DEBUG
|
||||||
|
int panic_on_overflow __read_mostly = 1;
|
||||||
|
int force_iommu __read_mostly = 1;
|
||||||
|
#else
|
||||||
|
int panic_on_overflow __read_mostly = 0;
|
||||||
|
int force_iommu __read_mostly = 0;
|
||||||
|
#endif
|
||||||
|
|
||||||
int iommu_merge __read_mostly = 0;
|
int iommu_merge __read_mostly = 0;
|
||||||
|
|
||||||
dma_addr_t bad_dma_address __read_mostly;
|
int no_iommu __read_mostly;
|
||||||
EXPORT_SYMBOL(bad_dma_address);
|
/* Set this to 1 if there is a HW IOMMU in the system */
|
||||||
|
int iommu_detected __read_mostly = 0;
|
||||||
|
|
||||||
/* This tells the BIO block layer to assume merging. Default to off
|
/* This tells the BIO block layer to assume merging. Default to off
|
||||||
because we cannot guarantee merging later. */
|
because we cannot guarantee merging later. */
|
||||||
int iommu_bio_merge __read_mostly = 0;
|
int iommu_bio_merge __read_mostly = 0;
|
||||||
EXPORT_SYMBOL(iommu_bio_merge);
|
EXPORT_SYMBOL(iommu_bio_merge);
|
||||||
|
|
||||||
static int iommu_sac_force __read_mostly = 0;
|
dma_addr_t bad_dma_address __read_mostly = 0;
|
||||||
|
EXPORT_SYMBOL(bad_dma_address);
|
||||||
int no_iommu __read_mostly;
|
|
||||||
#ifdef CONFIG_IOMMU_DEBUG
|
|
||||||
int panic_on_overflow __read_mostly = 1;
|
|
||||||
int force_iommu __read_mostly = 1;
|
|
||||||
#else
|
|
||||||
int panic_on_overflow __read_mostly = 0;
|
|
||||||
int force_iommu __read_mostly= 0;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Set this to 1 if there is a HW IOMMU in the system */
|
|
||||||
int iommu_detected __read_mostly = 0;
|
|
||||||
|
|
||||||
/* Dummy device used for NULL arguments (normally ISA). Better would
|
/* Dummy device used for NULL arguments (normally ISA). Better would
|
||||||
be probably a smaller DMA mask, but this is bug-to-bug compatible
|
be probably a smaller DMA mask, but this is bug-to-bug compatible
|
||||||
to i386. */
|
to older i386. */
|
||||||
struct device fallback_dev = {
|
struct device fallback_dev = {
|
||||||
.bus_id = "fallback device",
|
.bus_id = "fallback device",
|
||||||
.coherent_dma_mask = DMA_32BIT_MASK,
|
.coherent_dma_mask = DMA_32BIT_MASK,
|
||||||
.dma_mask = &fallback_dev.coherent_dma_mask,
|
.dma_mask = &fallback_dev.coherent_dma_mask,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Allocate DMA memory on node near device */
|
|
||||||
noinline static void *
|
|
||||||
dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
|
|
||||||
{
|
|
||||||
struct page *page;
|
|
||||||
int node;
|
|
||||||
|
|
||||||
node = dev_to_node(dev);
|
|
||||||
|
|
||||||
page = alloc_pages_node(node, gfp, order);
|
|
||||||
return page ? page_address(page) : NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Allocate memory for a coherent mapping.
|
|
||||||
*/
|
|
||||||
void *
|
|
||||||
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|
||||||
gfp_t gfp)
|
|
||||||
{
|
|
||||||
void *memory;
|
|
||||||
unsigned long dma_mask = 0;
|
|
||||||
u64 bus;
|
|
||||||
|
|
||||||
if (!dev)
|
|
||||||
dev = &fallback_dev;
|
|
||||||
dma_mask = dev->coherent_dma_mask;
|
|
||||||
if (dma_mask == 0)
|
|
||||||
dma_mask = DMA_32BIT_MASK;
|
|
||||||
|
|
||||||
/* Device not DMA able */
|
|
||||||
if (dev->dma_mask == NULL)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
/* Don't invoke OOM killer */
|
|
||||||
gfp |= __GFP_NORETRY;
|
|
||||||
|
|
||||||
/* Kludge to make it bug-to-bug compatible with i386. i386
|
|
||||||
uses the normal dma_mask for alloc_coherent. */
|
|
||||||
dma_mask &= *dev->dma_mask;
|
|
||||||
|
|
||||||
/* Why <=? Even when the mask is smaller than 4GB it is often
|
|
||||||
larger than 16MB and in this case we have a chance of
|
|
||||||
finding fitting memory in the next higher zone first. If
|
|
||||||
not retry with true GFP_DMA. -AK */
|
|
||||||
if (dma_mask <= DMA_32BIT_MASK)
|
|
||||||
gfp |= GFP_DMA32;
|
|
||||||
|
|
||||||
again:
|
|
||||||
memory = dma_alloc_pages(dev, gfp, get_order(size));
|
|
||||||
if (memory == NULL)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
{
|
|
||||||
int high, mmu;
|
|
||||||
bus = virt_to_bus(memory);
|
|
||||||
high = (bus + size) >= dma_mask;
|
|
||||||
mmu = high;
|
|
||||||
if (force_iommu && !(gfp & GFP_DMA))
|
|
||||||
mmu = 1;
|
|
||||||
else if (high) {
|
|
||||||
free_pages((unsigned long)memory,
|
|
||||||
get_order(size));
|
|
||||||
|
|
||||||
/* Don't use the 16MB ZONE_DMA unless absolutely
|
|
||||||
needed. It's better to use remapping first. */
|
|
||||||
if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
|
|
||||||
gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
|
|
||||||
goto again;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Let low level make its own zone decisions */
|
|
||||||
gfp &= ~(GFP_DMA32|GFP_DMA);
|
|
||||||
|
|
||||||
if (dma_ops->alloc_coherent)
|
|
||||||
return dma_ops->alloc_coherent(dev, size,
|
|
||||||
dma_handle, gfp);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
memset(memory, 0, size);
|
|
||||||
if (!mmu) {
|
|
||||||
*dma_handle = virt_to_bus(memory);
|
|
||||||
return memory;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (dma_ops->alloc_coherent) {
|
|
||||||
free_pages((unsigned long)memory, get_order(size));
|
|
||||||
gfp &= ~(GFP_DMA|GFP_DMA32);
|
|
||||||
return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (dma_ops->map_simple) {
|
|
||||||
*dma_handle = dma_ops->map_simple(dev, memory,
|
|
||||||
size,
|
|
||||||
PCI_DMA_BIDIRECTIONAL);
|
|
||||||
if (*dma_handle != bad_dma_address)
|
|
||||||
return memory;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (panic_on_overflow)
|
|
||||||
panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",size);
|
|
||||||
free_pages((unsigned long)memory, get_order(size));
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(dma_alloc_coherent);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Unmap coherent memory.
|
|
||||||
* The caller must ensure that the device has finished accessing the mapping.
|
|
||||||
*/
|
|
||||||
void dma_free_coherent(struct device *dev, size_t size,
|
|
||||||
void *vaddr, dma_addr_t bus)
|
|
||||||
{
|
|
||||||
WARN_ON(irqs_disabled()); /* for portability */
|
|
||||||
if (dma_ops->unmap_single)
|
|
||||||
dma_ops->unmap_single(dev, bus, size, 0);
|
|
||||||
free_pages((unsigned long)vaddr, get_order(size));
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(dma_free_coherent);
|
|
||||||
|
|
||||||
static int forbid_dac __read_mostly;
|
|
||||||
|
|
||||||
int dma_supported(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_PCI
|
|
||||||
if (mask > 0xffffffff && forbid_dac > 0) {
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
printk(KERN_INFO "PCI: Disallowing DAC for device %s\n", dev->bus_id);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (dma_ops->dma_supported)
|
|
||||||
return dma_ops->dma_supported(dev, mask);
|
|
||||||
|
|
||||||
/* Copied from i386. Doesn't make much sense, because it will
|
|
||||||
only work for pci_alloc_coherent.
|
|
||||||
The caller just has to use GFP_DMA in this case. */
|
|
||||||
if (mask < DMA_24BIT_MASK)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* Tell the device to use SAC when IOMMU force is on. This
|
|
||||||
allows the driver to use cheaper accesses in some cases.
|
|
||||||
|
|
||||||
Problem with this is that if we overflow the IOMMU area and
|
|
||||||
return DAC as fallback address the device may not handle it
|
|
||||||
correctly.
|
|
||||||
|
|
||||||
As a special case some controllers have a 39bit address
|
|
||||||
mode that is as efficient as 32bit (aic79xx). Don't force
|
|
||||||
SAC for these. Assume all masks <= 40 bits are of this
|
|
||||||
type. Normally this doesn't make any difference, but gives
|
|
||||||
more gentle handling of IOMMU overflow. */
|
|
||||||
if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
|
|
||||||
printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(dma_supported);
|
|
||||||
|
|
||||||
int dma_set_mask(struct device *dev, u64 mask)
|
int dma_set_mask(struct device *dev, u64 mask)
|
||||||
{
|
{
|
||||||
if (!dev->dma_mask || !dma_supported(dev, mask))
|
if (!dev->dma_mask || !dma_supported(dev, mask))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
*dev->dma_mask = mask;
|
*dev->dma_mask = mask;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_set_mask);
|
EXPORT_SYMBOL(dma_set_mask);
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
static __initdata void *dma32_bootmem_ptr;
|
||||||
|
static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
|
||||||
|
|
||||||
|
static int __init parse_dma32_size_opt(char *p)
|
||||||
|
{
|
||||||
|
if (!p)
|
||||||
|
return -EINVAL;
|
||||||
|
dma32_bootmem_size = memparse(p, &p);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
early_param("dma32_size", parse_dma32_size_opt);
|
||||||
|
|
||||||
|
void __init dma32_reserve_bootmem(void)
|
||||||
|
{
|
||||||
|
unsigned long size, align;
|
||||||
|
if (end_pfn <= MAX_DMA32_PFN)
|
||||||
|
return;
|
||||||
|
|
||||||
|
align = 64ULL<<20;
|
||||||
|
size = round_up(dma32_bootmem_size, align);
|
||||||
|
dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
|
||||||
|
__pa(MAX_DMA_ADDRESS));
|
||||||
|
if (dma32_bootmem_ptr)
|
||||||
|
dma32_bootmem_size = size;
|
||||||
|
else
|
||||||
|
dma32_bootmem_size = 0;
|
||||||
|
}
|
||||||
|
static void __init dma32_free_bootmem(void)
|
||||||
|
{
|
||||||
|
int node;
|
||||||
|
|
||||||
|
if (end_pfn <= MAX_DMA32_PFN)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!dma32_bootmem_ptr)
|
||||||
|
return;
|
||||||
|
|
||||||
|
for_each_online_node(node)
|
||||||
|
free_bootmem_node(NODE_DATA(node), __pa(dma32_bootmem_ptr),
|
||||||
|
dma32_bootmem_size);
|
||||||
|
|
||||||
|
dma32_bootmem_ptr = NULL;
|
||||||
|
dma32_bootmem_size = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void __init pci_iommu_alloc(void)
|
||||||
|
{
|
||||||
|
/* free the range so iommu could get some range less than 4G */
|
||||||
|
dma32_free_bootmem();
|
||||||
|
/*
|
||||||
|
* The order of these functions is important for
|
||||||
|
* fall-back/fail-over reasons
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_GART_IOMMU
|
||||||
|
gart_iommu_hole_init();
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_CALGARY_IOMMU
|
||||||
|
detect_calgary();
|
||||||
|
#endif
|
||||||
|
|
||||||
|
detect_intel_iommu();
|
||||||
|
|
||||||
|
#ifdef CONFIG_SWIOTLB
|
||||||
|
pci_swiotlb_init();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
|
* See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
|
||||||
* documentation.
|
* documentation.
|
||||||
|
@ -263,7 +171,10 @@ static __init int iommu_setup(char *p)
|
||||||
forbid_dac = 0;
|
forbid_dac = 0;
|
||||||
if (!strncmp(p, "nodac", 5))
|
if (!strncmp(p, "nodac", 5))
|
||||||
forbid_dac = -1;
|
forbid_dac = -1;
|
||||||
|
if (!strncmp(p, "usedac", 6)) {
|
||||||
|
forbid_dac = -1;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
#ifdef CONFIG_SWIOTLB
|
#ifdef CONFIG_SWIOTLB
|
||||||
if (!strncmp(p, "soft", 4))
|
if (!strncmp(p, "soft", 4))
|
||||||
swiotlb = 1;
|
swiotlb = 1;
|
||||||
|
@ -286,26 +197,294 @@ static __init int iommu_setup(char *p)
|
||||||
}
|
}
|
||||||
early_param("iommu", iommu_setup);
|
early_param("iommu", iommu_setup);
|
||||||
|
|
||||||
void __init pci_iommu_alloc(void)
|
#ifdef CONFIG_X86_32
|
||||||
|
int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
|
||||||
|
dma_addr_t device_addr, size_t size, int flags)
|
||||||
{
|
{
|
||||||
/*
|
void __iomem *mem_base = NULL;
|
||||||
* The order of these functions is important for
|
int pages = size >> PAGE_SHIFT;
|
||||||
* fall-back/fail-over reasons
|
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
|
||||||
*/
|
|
||||||
#ifdef CONFIG_GART_IOMMU
|
|
||||||
gart_iommu_hole_init();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_CALGARY_IOMMU
|
if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
|
||||||
detect_calgary();
|
goto out;
|
||||||
#endif
|
if (!size)
|
||||||
|
goto out;
|
||||||
|
if (dev->dma_mem)
|
||||||
|
goto out;
|
||||||
|
|
||||||
detect_intel_iommu();
|
/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
|
||||||
|
|
||||||
#ifdef CONFIG_SWIOTLB
|
mem_base = ioremap(bus_addr, size);
|
||||||
pci_swiotlb_init();
|
if (!mem_base)
|
||||||
#endif
|
goto out;
|
||||||
|
|
||||||
|
dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
|
||||||
|
if (!dev->dma_mem)
|
||||||
|
goto out;
|
||||||
|
dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
|
||||||
|
if (!dev->dma_mem->bitmap)
|
||||||
|
goto free1_out;
|
||||||
|
|
||||||
|
dev->dma_mem->virt_base = mem_base;
|
||||||
|
dev->dma_mem->device_base = device_addr;
|
||||||
|
dev->dma_mem->size = pages;
|
||||||
|
dev->dma_mem->flags = flags;
|
||||||
|
|
||||||
|
if (flags & DMA_MEMORY_MAP)
|
||||||
|
return DMA_MEMORY_MAP;
|
||||||
|
|
||||||
|
return DMA_MEMORY_IO;
|
||||||
|
|
||||||
|
free1_out:
|
||||||
|
kfree(dev->dma_mem);
|
||||||
|
out:
|
||||||
|
if (mem_base)
|
||||||
|
iounmap(mem_base);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(dma_declare_coherent_memory);
|
||||||
|
|
||||||
|
void dma_release_declared_memory(struct device *dev)
|
||||||
|
{
|
||||||
|
struct dma_coherent_mem *mem = dev->dma_mem;
|
||||||
|
|
||||||
|
if (!mem)
|
||||||
|
return;
|
||||||
|
dev->dma_mem = NULL;
|
||||||
|
iounmap(mem->virt_base);
|
||||||
|
kfree(mem->bitmap);
|
||||||
|
kfree(mem);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dma_release_declared_memory);
|
||||||
|
|
||||||
|
void *dma_mark_declared_memory_occupied(struct device *dev,
|
||||||
|
dma_addr_t device_addr, size_t size)
|
||||||
|
{
|
||||||
|
struct dma_coherent_mem *mem = dev->dma_mem;
|
||||||
|
int pos, err;
|
||||||
|
int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1);
|
||||||
|
|
||||||
|
pages >>= PAGE_SHIFT;
|
||||||
|
|
||||||
|
if (!mem)
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
|
pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
|
||||||
|
err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
|
||||||
|
if (err != 0)
|
||||||
|
return ERR_PTR(err);
|
||||||
|
return mem->virt_base + (pos << PAGE_SHIFT);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
|
||||||
|
|
||||||
|
static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
|
||||||
|
dma_addr_t *dma_handle, void **ret)
|
||||||
|
{
|
||||||
|
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
|
||||||
|
int order = get_order(size);
|
||||||
|
|
||||||
|
if (mem) {
|
||||||
|
int page = bitmap_find_free_region(mem->bitmap, mem->size,
|
||||||
|
order);
|
||||||
|
if (page >= 0) {
|
||||||
|
*dma_handle = mem->device_base + (page << PAGE_SHIFT);
|
||||||
|
*ret = mem->virt_base + (page << PAGE_SHIFT);
|
||||||
|
memset(*ret, 0, size);
|
||||||
|
}
|
||||||
|
if (mem->flags & DMA_MEMORY_EXCLUSIVE)
|
||||||
|
*ret = NULL;
|
||||||
|
}
|
||||||
|
return (mem != NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int dma_release_coherent(struct device *dev, int order, void *vaddr)
|
||||||
|
{
|
||||||
|
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
|
||||||
|
|
||||||
|
if (mem && vaddr >= mem->virt_base && vaddr <
|
||||||
|
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
|
||||||
|
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
bitmap_release_region(mem->bitmap, page, order);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
#define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
|
||||||
|
#define dma_release_coherent(dev, order, vaddr) (0)
|
||||||
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
||||||
|
int dma_supported(struct device *dev, u64 mask)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_PCI
|
||||||
|
if (mask > 0xffffffff && forbid_dac > 0) {
|
||||||
|
printk(KERN_INFO "PCI: Disallowing DAC for device %s\n",
|
||||||
|
dev->bus_id);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (dma_ops->dma_supported)
|
||||||
|
return dma_ops->dma_supported(dev, mask);
|
||||||
|
|
||||||
|
/* Copied from i386. Doesn't make much sense, because it will
|
||||||
|
only work for pci_alloc_coherent.
|
||||||
|
The caller just has to use GFP_DMA in this case. */
|
||||||
|
if (mask < DMA_24BIT_MASK)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* Tell the device to use SAC when IOMMU force is on. This
|
||||||
|
allows the driver to use cheaper accesses in some cases.
|
||||||
|
|
||||||
|
Problem with this is that if we overflow the IOMMU area and
|
||||||
|
return DAC as fallback address the device may not handle it
|
||||||
|
correctly.
|
||||||
|
|
||||||
|
As a special case some controllers have a 39bit address
|
||||||
|
mode that is as efficient as 32bit (aic79xx). Don't force
|
||||||
|
SAC for these. Assume all masks <= 40 bits are of this
|
||||||
|
type. Normally this doesn't make any difference, but gives
|
||||||
|
more gentle handling of IOMMU overflow. */
|
||||||
|
if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
|
||||||
|
printk(KERN_INFO "%s: Force SAC with mask %Lx\n",
|
||||||
|
dev->bus_id, mask);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dma_supported);
|
||||||
|
|
||||||
|
/* Allocate DMA memory on node near device */
|
||||||
|
noinline struct page *
|
||||||
|
dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
|
||||||
|
{
|
||||||
|
int node;
|
||||||
|
|
||||||
|
node = dev_to_node(dev);
|
||||||
|
|
||||||
|
return alloc_pages_node(node, gfp, order);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocate memory for a coherent mapping.
|
||||||
|
*/
|
||||||
|
void *
|
||||||
|
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||||
|
gfp_t gfp)
|
||||||
|
{
|
||||||
|
void *memory = NULL;
|
||||||
|
struct page *page;
|
||||||
|
unsigned long dma_mask = 0;
|
||||||
|
dma_addr_t bus;
|
||||||
|
|
||||||
|
/* ignore region specifiers */
|
||||||
|
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
|
||||||
|
|
||||||
|
if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
|
||||||
|
return memory;
|
||||||
|
|
||||||
|
if (!dev)
|
||||||
|
dev = &fallback_dev;
|
||||||
|
dma_mask = dev->coherent_dma_mask;
|
||||||
|
if (dma_mask == 0)
|
||||||
|
dma_mask = DMA_32BIT_MASK;
|
||||||
|
|
||||||
|
/* Device not DMA able */
|
||||||
|
if (dev->dma_mask == NULL)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
/* Don't invoke OOM killer */
|
||||||
|
gfp |= __GFP_NORETRY;
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
/* Why <=? Even when the mask is smaller than 4GB it is often
|
||||||
|
larger than 16MB and in this case we have a chance of
|
||||||
|
finding fitting memory in the next higher zone first. If
|
||||||
|
not retry with true GFP_DMA. -AK */
|
||||||
|
if (dma_mask <= DMA_32BIT_MASK)
|
||||||
|
gfp |= GFP_DMA32;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
again:
|
||||||
|
page = dma_alloc_pages(dev, gfp, get_order(size));
|
||||||
|
if (page == NULL)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
{
|
||||||
|
int high, mmu;
|
||||||
|
bus = page_to_phys(page);
|
||||||
|
memory = page_address(page);
|
||||||
|
high = (bus + size) >= dma_mask;
|
||||||
|
mmu = high;
|
||||||
|
if (force_iommu && !(gfp & GFP_DMA))
|
||||||
|
mmu = 1;
|
||||||
|
else if (high) {
|
||||||
|
free_pages((unsigned long)memory,
|
||||||
|
get_order(size));
|
||||||
|
|
||||||
|
/* Don't use the 16MB ZONE_DMA unless absolutely
|
||||||
|
needed. It's better to use remapping first. */
|
||||||
|
if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
|
||||||
|
gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
|
||||||
|
goto again;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Let low level make its own zone decisions */
|
||||||
|
gfp &= ~(GFP_DMA32|GFP_DMA);
|
||||||
|
|
||||||
|
if (dma_ops->alloc_coherent)
|
||||||
|
return dma_ops->alloc_coherent(dev, size,
|
||||||
|
dma_handle, gfp);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
memset(memory, 0, size);
|
||||||
|
if (!mmu) {
|
||||||
|
*dma_handle = bus;
|
||||||
|
return memory;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dma_ops->alloc_coherent) {
|
||||||
|
free_pages((unsigned long)memory, get_order(size));
|
||||||
|
gfp &= ~(GFP_DMA|GFP_DMA32);
|
||||||
|
return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dma_ops->map_simple) {
|
||||||
|
*dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
|
||||||
|
size,
|
||||||
|
PCI_DMA_BIDIRECTIONAL);
|
||||||
|
if (*dma_handle != bad_dma_address)
|
||||||
|
return memory;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (panic_on_overflow)
|
||||||
|
panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
|
||||||
|
(unsigned long)size);
|
||||||
|
free_pages((unsigned long)memory, get_order(size));
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dma_alloc_coherent);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Unmap coherent memory.
|
||||||
|
* The caller must ensure that the device has finished accessing the mapping.
|
||||||
|
*/
|
||||||
|
void dma_free_coherent(struct device *dev, size_t size,
|
||||||
|
void *vaddr, dma_addr_t bus)
|
||||||
|
{
|
||||||
|
int order = get_order(size);
|
||||||
|
WARN_ON(irqs_disabled()); /* for portability */
|
||||||
|
if (dma_release_coherent(dev, order, vaddr))
|
||||||
|
return;
|
||||||
|
if (dma_ops->unmap_single)
|
||||||
|
dma_ops->unmap_single(dev, bus, size, 0);
|
||||||
|
free_pages((unsigned long)vaddr, order);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dma_free_coherent);
|
||||||
|
|
||||||
static int __init pci_iommu_init(void)
|
static int __init pci_iommu_init(void)
|
||||||
{
|
{
|
||||||
|
@ -327,6 +506,8 @@ void pci_iommu_shutdown(void)
|
||||||
{
|
{
|
||||||
gart_iommu_shutdown();
|
gart_iommu_shutdown();
|
||||||
}
|
}
|
||||||
|
/* Must execute after PCI subsystem */
|
||||||
|
fs_initcall(pci_iommu_init);
|
||||||
|
|
||||||
#ifdef CONFIG_PCI
|
#ifdef CONFIG_PCI
|
||||||
/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
|
/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
|
||||||
|
@ -334,11 +515,10 @@ void pci_iommu_shutdown(void)
|
||||||
static __devinit void via_no_dac(struct pci_dev *dev)
|
static __devinit void via_no_dac(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
|
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
|
||||||
printk(KERN_INFO "PCI: VIA PCI bridge detected. Disabling DAC.\n");
|
printk(KERN_INFO "PCI: VIA PCI bridge detected."
|
||||||
|
"Disabling DAC.\n");
|
||||||
forbid_dac = 1;
|
forbid_dac = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
|
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
|
||||||
#endif
|
#endif
|
||||||
/* Must execute after PCI subsystem */
|
|
||||||
fs_initcall(pci_iommu_init);
|
|
|
@ -1,177 +0,0 @@
|
||||||
/*
|
|
||||||
* Dynamic DMA mapping support.
|
|
||||||
*
|
|
||||||
* On i386 there is no hardware dynamic DMA address translation,
|
|
||||||
* so consistent alloc/free are merely page allocation/freeing.
|
|
||||||
* The rest of the dynamic DMA mapping interface is implemented
|
|
||||||
* in asm/pci.h.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/types.h>
|
|
||||||
#include <linux/mm.h>
|
|
||||||
#include <linux/string.h>
|
|
||||||
#include <linux/pci.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <asm/io.h>
|
|
||||||
|
|
||||||
struct dma_coherent_mem {
|
|
||||||
void *virt_base;
|
|
||||||
u32 device_base;
|
|
||||||
int size;
|
|
||||||
int flags;
|
|
||||||
unsigned long *bitmap;
|
|
||||||
};
|
|
||||||
|
|
||||||
void *dma_alloc_coherent(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t gfp)
|
|
||||||
{
|
|
||||||
void *ret;
|
|
||||||
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
|
|
||||||
int order = get_order(size);
|
|
||||||
/* ignore region specifiers */
|
|
||||||
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
|
|
||||||
|
|
||||||
if (mem) {
|
|
||||||
int page = bitmap_find_free_region(mem->bitmap, mem->size,
|
|
||||||
order);
|
|
||||||
if (page >= 0) {
|
|
||||||
*dma_handle = mem->device_base + (page << PAGE_SHIFT);
|
|
||||||
ret = mem->virt_base + (page << PAGE_SHIFT);
|
|
||||||
memset(ret, 0, size);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
if (mem->flags & DMA_MEMORY_EXCLUSIVE)
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
|
|
||||||
gfp |= GFP_DMA;
|
|
||||||
|
|
||||||
ret = (void *)__get_free_pages(gfp, order);
|
|
||||||
|
|
||||||
if (ret != NULL) {
|
|
||||||
memset(ret, 0, size);
|
|
||||||
*dma_handle = virt_to_phys(ret);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(dma_alloc_coherent);
|
|
||||||
|
|
||||||
void dma_free_coherent(struct device *dev, size_t size,
|
|
||||||
void *vaddr, dma_addr_t dma_handle)
|
|
||||||
{
|
|
||||||
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
|
|
||||||
int order = get_order(size);
|
|
||||||
|
|
||||||
WARN_ON(irqs_disabled()); /* for portability */
|
|
||||||
if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
|
|
||||||
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
|
|
||||||
|
|
||||||
bitmap_release_region(mem->bitmap, page, order);
|
|
||||||
} else
|
|
||||||
free_pages((unsigned long)vaddr, order);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(dma_free_coherent);
|
|
||||||
|
|
||||||
int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
|
|
||||||
dma_addr_t device_addr, size_t size, int flags)
|
|
||||||
{
|
|
||||||
void __iomem *mem_base = NULL;
|
|
||||||
int pages = size >> PAGE_SHIFT;
|
|
||||||
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
|
|
||||||
|
|
||||||
if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
|
|
||||||
goto out;
|
|
||||||
if (!size)
|
|
||||||
goto out;
|
|
||||||
if (dev->dma_mem)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
|
|
||||||
|
|
||||||
mem_base = ioremap(bus_addr, size);
|
|
||||||
if (!mem_base)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
|
|
||||||
if (!dev->dma_mem)
|
|
||||||
goto out;
|
|
||||||
dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
|
|
||||||
if (!dev->dma_mem->bitmap)
|
|
||||||
goto free1_out;
|
|
||||||
|
|
||||||
dev->dma_mem->virt_base = mem_base;
|
|
||||||
dev->dma_mem->device_base = device_addr;
|
|
||||||
dev->dma_mem->size = pages;
|
|
||||||
dev->dma_mem->flags = flags;
|
|
||||||
|
|
||||||
if (flags & DMA_MEMORY_MAP)
|
|
||||||
return DMA_MEMORY_MAP;
|
|
||||||
|
|
||||||
return DMA_MEMORY_IO;
|
|
||||||
|
|
||||||
free1_out:
|
|
||||||
kfree(dev->dma_mem);
|
|
||||||
out:
|
|
||||||
if (mem_base)
|
|
||||||
iounmap(mem_base);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(dma_declare_coherent_memory);
|
|
||||||
|
|
||||||
void dma_release_declared_memory(struct device *dev)
|
|
||||||
{
|
|
||||||
struct dma_coherent_mem *mem = dev->dma_mem;
|
|
||||||
|
|
||||||
if(!mem)
|
|
||||||
return;
|
|
||||||
dev->dma_mem = NULL;
|
|
||||||
iounmap(mem->virt_base);
|
|
||||||
kfree(mem->bitmap);
|
|
||||||
kfree(mem);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(dma_release_declared_memory);
|
|
||||||
|
|
||||||
void *dma_mark_declared_memory_occupied(struct device *dev,
|
|
||||||
dma_addr_t device_addr, size_t size)
|
|
||||||
{
|
|
||||||
struct dma_coherent_mem *mem = dev->dma_mem;
|
|
||||||
int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
||||||
int pos, err;
|
|
||||||
|
|
||||||
if (!mem)
|
|
||||||
return ERR_PTR(-EINVAL);
|
|
||||||
|
|
||||||
pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
|
|
||||||
err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
|
|
||||||
if (err != 0)
|
|
||||||
return ERR_PTR(err);
|
|
||||||
return mem->virt_base + (pos << PAGE_SHIFT);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
|
|
||||||
|
|
||||||
#ifdef CONFIG_PCI
|
|
||||||
/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
|
|
||||||
|
|
||||||
int forbid_dac;
|
|
||||||
EXPORT_SYMBOL(forbid_dac);
|
|
||||||
|
|
||||||
static __devinit void via_no_dac(struct pci_dev *dev)
|
|
||||||
{
|
|
||||||
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
|
|
||||||
printk(KERN_INFO "PCI: VIA PCI bridge detected. Disabling DAC.\n");
|
|
||||||
forbid_dac = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
|
|
||||||
|
|
||||||
static int check_iommu(char *s)
|
|
||||||
{
|
|
||||||
if (!strcmp(s, "usedac")) {
|
|
||||||
forbid_dac = -1;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
__setup("iommu=", check_iommu);
|
|
||||||
#endif
|
|
|
@ -264,9 +264,9 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
|
||||||
}
|
}
|
||||||
|
|
||||||
static dma_addr_t
|
static dma_addr_t
|
||||||
gart_map_simple(struct device *dev, char *buf, size_t size, int dir)
|
gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir)
|
||||||
{
|
{
|
||||||
dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
|
dma_addr_t map = dma_map_area(dev, paddr, size, dir);
|
||||||
|
|
||||||
flush_gart();
|
flush_gart();
|
||||||
|
|
||||||
|
@ -275,18 +275,17 @@ gart_map_simple(struct device *dev, char *buf, size_t size, int dir)
|
||||||
|
|
||||||
/* Map a single area into the IOMMU */
|
/* Map a single area into the IOMMU */
|
||||||
static dma_addr_t
|
static dma_addr_t
|
||||||
gart_map_single(struct device *dev, void *addr, size_t size, int dir)
|
gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
|
||||||
{
|
{
|
||||||
unsigned long phys_mem, bus;
|
unsigned long bus;
|
||||||
|
|
||||||
if (!dev)
|
if (!dev)
|
||||||
dev = &fallback_dev;
|
dev = &fallback_dev;
|
||||||
|
|
||||||
phys_mem = virt_to_phys(addr);
|
if (!need_iommu(dev, paddr, size))
|
||||||
if (!need_iommu(dev, phys_mem, size))
|
return paddr;
|
||||||
return phys_mem;
|
|
||||||
|
|
||||||
bus = gart_map_simple(dev, addr, size, dir);
|
bus = gart_map_simple(dev, paddr, size, dir);
|
||||||
|
|
||||||
return bus;
|
return bus;
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
static int
|
static int
|
||||||
check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
|
check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
|
||||||
{
|
{
|
||||||
if (hwdev && bus + size > *hwdev->dma_mask) {
|
if (hwdev && bus + size > *hwdev->dma_mask) {
|
||||||
if (*hwdev->dma_mask >= DMA_32BIT_MASK)
|
if (*hwdev->dma_mask >= DMA_32BIT_MASK)
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
|
"nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
|
||||||
|
@ -26,19 +26,17 @@ check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
|
||||||
}
|
}
|
||||||
|
|
||||||
static dma_addr_t
|
static dma_addr_t
|
||||||
nommu_map_single(struct device *hwdev, void *ptr, size_t size,
|
nommu_map_single(struct device *hwdev, phys_addr_t paddr, size_t size,
|
||||||
int direction)
|
int direction)
|
||||||
{
|
{
|
||||||
dma_addr_t bus = virt_to_bus(ptr);
|
dma_addr_t bus = paddr;
|
||||||
|
WARN_ON(size == 0);
|
||||||
if (!check_addr("map_single", hwdev, bus, size))
|
if (!check_addr("map_single", hwdev, bus, size))
|
||||||
return bad_dma_address;
|
return bad_dma_address;
|
||||||
|
flush_write_buffers();
|
||||||
return bus;
|
return bus;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nommu_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
|
|
||||||
int direction)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Map a set of buffers described by scatterlist in streaming
|
/* Map a set of buffers described by scatterlist in streaming
|
||||||
* mode for DMA. This is the scatter-gather version of the
|
* mode for DMA. This is the scatter-gather version of the
|
||||||
|
@ -61,30 +59,34 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
|
||||||
struct scatterlist *s;
|
struct scatterlist *s;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
WARN_ON(nents == 0 || sg[0].length == 0);
|
||||||
|
|
||||||
for_each_sg(sg, s, nents, i) {
|
for_each_sg(sg, s, nents, i) {
|
||||||
BUG_ON(!sg_page(s));
|
BUG_ON(!sg_page(s));
|
||||||
s->dma_address = virt_to_bus(sg_virt(s));
|
s->dma_address = sg_phys(s);
|
||||||
if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
|
if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
|
||||||
return 0;
|
return 0;
|
||||||
s->dma_length = s->length;
|
s->dma_length = s->length;
|
||||||
}
|
}
|
||||||
|
flush_write_buffers();
|
||||||
return nents;
|
return nents;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Unmap a set of streaming mode DMA translations.
|
/* Make sure we keep the same behaviour */
|
||||||
* Again, cpu read rules concerning calls here are the same as for
|
static int nommu_mapping_error(dma_addr_t dma_addr)
|
||||||
* pci_unmap_single() above.
|
|
||||||
*/
|
|
||||||
static void nommu_unmap_sg(struct device *dev, struct scatterlist *sg,
|
|
||||||
int nents, int dir)
|
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
return 0;
|
||||||
|
#else
|
||||||
|
return (dma_addr == bad_dma_address);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
const struct dma_mapping_ops nommu_dma_ops = {
|
const struct dma_mapping_ops nommu_dma_ops = {
|
||||||
.map_single = nommu_map_single,
|
.map_single = nommu_map_single,
|
||||||
.unmap_single = nommu_unmap_single,
|
|
||||||
.map_sg = nommu_map_sg,
|
.map_sg = nommu_map_sg,
|
||||||
.unmap_sg = nommu_unmap_sg,
|
.mapping_error = nommu_mapping_error,
|
||||||
.is_phys = 1,
|
.is_phys = 1,
|
||||||
};
|
};
|
||||||
|
|
|
@ -11,11 +11,18 @@
|
||||||
|
|
||||||
int swiotlb __read_mostly;
|
int swiotlb __read_mostly;
|
||||||
|
|
||||||
|
static dma_addr_t
|
||||||
|
swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
|
||||||
|
int direction)
|
||||||
|
{
|
||||||
|
return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
|
||||||
|
}
|
||||||
|
|
||||||
const struct dma_mapping_ops swiotlb_dma_ops = {
|
const struct dma_mapping_ops swiotlb_dma_ops = {
|
||||||
.mapping_error = swiotlb_dma_mapping_error,
|
.mapping_error = swiotlb_dma_mapping_error,
|
||||||
.alloc_coherent = swiotlb_alloc_coherent,
|
.alloc_coherent = swiotlb_alloc_coherent,
|
||||||
.free_coherent = swiotlb_free_coherent,
|
.free_coherent = swiotlb_free_coherent,
|
||||||
.map_single = swiotlb_map_single,
|
.map_single = swiotlb_map_single_phys,
|
||||||
.unmap_single = swiotlb_unmap_single,
|
.unmap_single = swiotlb_unmap_single,
|
||||||
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
|
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
|
||||||
.sync_single_for_device = swiotlb_sync_single_for_device,
|
.sync_single_for_device = swiotlb_sync_single_for_device,
|
||||||
|
|
44
arch/x86/kernel/process.c
Normal file
44
arch/x86/kernel/process.c
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
#include <linux/errno.h>
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/mm.h>
|
||||||
|
#include <linux/smp.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
|
|
||||||
|
struct kmem_cache *task_xstate_cachep;
|
||||||
|
|
||||||
|
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
||||||
|
{
|
||||||
|
*dst = *src;
|
||||||
|
if (src->thread.xstate) {
|
||||||
|
dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!dst->thread.xstate)
|
||||||
|
return -ENOMEM;
|
||||||
|
WARN_ON((unsigned long)dst->thread.xstate & 15);
|
||||||
|
memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void free_thread_xstate(struct task_struct *tsk)
|
||||||
|
{
|
||||||
|
if (tsk->thread.xstate) {
|
||||||
|
kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
|
||||||
|
tsk->thread.xstate = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void free_thread_info(struct thread_info *ti)
|
||||||
|
{
|
||||||
|
free_thread_xstate(ti->task);
|
||||||
|
free_pages((unsigned long)ti, get_order(THREAD_SIZE));
|
||||||
|
}
|
||||||
|
|
||||||
|
void arch_task_cache_init(void)
|
||||||
|
{
|
||||||
|
task_xstate_cachep =
|
||||||
|
kmem_cache_create("task_xstate", xstate_size,
|
||||||
|
__alignof__(union thread_xstate),
|
||||||
|
SLAB_PANIC, NULL);
|
||||||
|
}
|
|
@ -36,6 +36,7 @@
|
||||||
#include <linux/personality.h>
|
#include <linux/personality.h>
|
||||||
#include <linux/tick.h>
|
#include <linux/tick.h>
|
||||||
#include <linux/percpu.h>
|
#include <linux/percpu.h>
|
||||||
|
#include <linux/prctl.h>
|
||||||
|
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
|
@ -45,7 +46,6 @@
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/i387.h>
|
#include <asm/i387.h>
|
||||||
#include <asm/desc.h>
|
#include <asm/desc.h>
|
||||||
#include <asm/vm86.h>
|
|
||||||
#ifdef CONFIG_MATH_EMULATION
|
#ifdef CONFIG_MATH_EMULATION
|
||||||
#include <asm/math_emu.h>
|
#include <asm/math_emu.h>
|
||||||
#endif
|
#endif
|
||||||
|
@ -521,14 +521,18 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
|
||||||
regs->cs = __USER_CS;
|
regs->cs = __USER_CS;
|
||||||
regs->ip = new_ip;
|
regs->ip = new_ip;
|
||||||
regs->sp = new_sp;
|
regs->sp = new_sp;
|
||||||
|
/*
|
||||||
|
* Free the old FP and other extended state
|
||||||
|
*/
|
||||||
|
free_thread_xstate(current);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(start_thread);
|
EXPORT_SYMBOL_GPL(start_thread);
|
||||||
|
|
||||||
#ifdef CONFIG_SECCOMP
|
|
||||||
static void hard_disable_TSC(void)
|
static void hard_disable_TSC(void)
|
||||||
{
|
{
|
||||||
write_cr4(read_cr4() | X86_CR4_TSD);
|
write_cr4(read_cr4() | X86_CR4_TSD);
|
||||||
}
|
}
|
||||||
|
|
||||||
void disable_TSC(void)
|
void disable_TSC(void)
|
||||||
{
|
{
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
|
@ -540,11 +544,47 @@ void disable_TSC(void)
|
||||||
hard_disable_TSC();
|
hard_disable_TSC();
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hard_enable_TSC(void)
|
static void hard_enable_TSC(void)
|
||||||
{
|
{
|
||||||
write_cr4(read_cr4() & ~X86_CR4_TSD);
|
write_cr4(read_cr4() & ~X86_CR4_TSD);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SECCOMP */
|
|
||||||
|
void enable_TSC(void)
|
||||||
|
{
|
||||||
|
preempt_disable();
|
||||||
|
if (test_and_clear_thread_flag(TIF_NOTSC))
|
||||||
|
/*
|
||||||
|
* Must flip the CPU state synchronously with
|
||||||
|
* TIF_NOTSC in the current running context.
|
||||||
|
*/
|
||||||
|
hard_enable_TSC();
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
int get_tsc_mode(unsigned long adr)
|
||||||
|
{
|
||||||
|
unsigned int val;
|
||||||
|
|
||||||
|
if (test_thread_flag(TIF_NOTSC))
|
||||||
|
val = PR_TSC_SIGSEGV;
|
||||||
|
else
|
||||||
|
val = PR_TSC_ENABLE;
|
||||||
|
|
||||||
|
return put_user(val, (unsigned int __user *)adr);
|
||||||
|
}
|
||||||
|
|
||||||
|
int set_tsc_mode(unsigned int val)
|
||||||
|
{
|
||||||
|
if (val == PR_TSC_SIGSEGV)
|
||||||
|
disable_TSC();
|
||||||
|
else if (val == PR_TSC_ENABLE)
|
||||||
|
enable_TSC();
|
||||||
|
else
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static noinline void
|
static noinline void
|
||||||
__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
||||||
|
@ -578,7 +618,6 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
||||||
set_debugreg(next->debugreg7, 7);
|
set_debugreg(next->debugreg7, 7);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SECCOMP
|
|
||||||
if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
|
if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
|
||||||
test_tsk_thread_flag(next_p, TIF_NOTSC)) {
|
test_tsk_thread_flag(next_p, TIF_NOTSC)) {
|
||||||
/* prev and next are different */
|
/* prev and next are different */
|
||||||
|
@ -587,7 +626,6 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
||||||
else
|
else
|
||||||
hard_enable_TSC();
|
hard_enable_TSC();
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef X86_BTS
|
#ifdef X86_BTS
|
||||||
if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
|
if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
|
||||||
|
@ -669,7 +707,7 @@ struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct
|
||||||
|
|
||||||
/* we're going to use this soon, after a few expensive things */
|
/* we're going to use this soon, after a few expensive things */
|
||||||
if (next_p->fpu_counter > 5)
|
if (next_p->fpu_counter > 5)
|
||||||
prefetch(&next->i387.fxsave);
|
prefetch(next->xstate);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reload esp0.
|
* Reload esp0.
|
||||||
|
|
|
@ -36,6 +36,7 @@
|
||||||
#include <linux/kprobes.h>
|
#include <linux/kprobes.h>
|
||||||
#include <linux/kdebug.h>
|
#include <linux/kdebug.h>
|
||||||
#include <linux/tick.h>
|
#include <linux/tick.h>
|
||||||
|
#include <linux/prctl.h>
|
||||||
|
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
|
@ -532,9 +533,71 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
|
||||||
regs->ss = __USER_DS;
|
regs->ss = __USER_DS;
|
||||||
regs->flags = 0x200;
|
regs->flags = 0x200;
|
||||||
set_fs(USER_DS);
|
set_fs(USER_DS);
|
||||||
|
/*
|
||||||
|
* Free the old FP and other extended state
|
||||||
|
*/
|
||||||
|
free_thread_xstate(current);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(start_thread);
|
EXPORT_SYMBOL_GPL(start_thread);
|
||||||
|
|
||||||
|
static void hard_disable_TSC(void)
|
||||||
|
{
|
||||||
|
write_cr4(read_cr4() | X86_CR4_TSD);
|
||||||
|
}
|
||||||
|
|
||||||
|
void disable_TSC(void)
|
||||||
|
{
|
||||||
|
preempt_disable();
|
||||||
|
if (!test_and_set_thread_flag(TIF_NOTSC))
|
||||||
|
/*
|
||||||
|
* Must flip the CPU state synchronously with
|
||||||
|
* TIF_NOTSC in the current running context.
|
||||||
|
*/
|
||||||
|
hard_disable_TSC();
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void hard_enable_TSC(void)
|
||||||
|
{
|
||||||
|
write_cr4(read_cr4() & ~X86_CR4_TSD);
|
||||||
|
}
|
||||||
|
|
||||||
|
void enable_TSC(void)
|
||||||
|
{
|
||||||
|
preempt_disable();
|
||||||
|
if (test_and_clear_thread_flag(TIF_NOTSC))
|
||||||
|
/*
|
||||||
|
* Must flip the CPU state synchronously with
|
||||||
|
* TIF_NOTSC in the current running context.
|
||||||
|
*/
|
||||||
|
hard_enable_TSC();
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
int get_tsc_mode(unsigned long adr)
|
||||||
|
{
|
||||||
|
unsigned int val;
|
||||||
|
|
||||||
|
if (test_thread_flag(TIF_NOTSC))
|
||||||
|
val = PR_TSC_SIGSEGV;
|
||||||
|
else
|
||||||
|
val = PR_TSC_ENABLE;
|
||||||
|
|
||||||
|
return put_user(val, (unsigned int __user *)adr);
|
||||||
|
}
|
||||||
|
|
||||||
|
int set_tsc_mode(unsigned int val)
|
||||||
|
{
|
||||||
|
if (val == PR_TSC_SIGSEGV)
|
||||||
|
disable_TSC();
|
||||||
|
else if (val == PR_TSC_ENABLE)
|
||||||
|
enable_TSC();
|
||||||
|
else
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This special macro can be used to load a debugging register
|
* This special macro can be used to load a debugging register
|
||||||
*/
|
*/
|
||||||
|
@ -572,6 +635,15 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
|
||||||
loaddebug(next, 7);
|
loaddebug(next, 7);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
|
||||||
|
test_tsk_thread_flag(next_p, TIF_NOTSC)) {
|
||||||
|
/* prev and next are different */
|
||||||
|
if (test_tsk_thread_flag(next_p, TIF_NOTSC))
|
||||||
|
hard_disable_TSC();
|
||||||
|
else
|
||||||
|
hard_enable_TSC();
|
||||||
|
}
|
||||||
|
|
||||||
if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
|
if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
|
||||||
/*
|
/*
|
||||||
* Copy the relevant range of the IO bitmap.
|
* Copy the relevant range of the IO bitmap.
|
||||||
|
@ -614,7 +686,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||||
|
|
||||||
/* we're going to use this soon, after a few expensive things */
|
/* we're going to use this soon, after a few expensive things */
|
||||||
if (next_p->fpu_counter>5)
|
if (next_p->fpu_counter>5)
|
||||||
prefetch(&next->i387.fxsave);
|
prefetch(next->xstate);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reload esp0, LDT and the page table pointer:
|
* Reload esp0, LDT and the page table pointer:
|
||||||
|
|
|
@ -74,8 +74,8 @@ int force_personality32 = 0;
|
||||||
Control non executable heap for 32bit processes.
|
Control non executable heap for 32bit processes.
|
||||||
To control the stack too use noexec=off
|
To control the stack too use noexec=off
|
||||||
|
|
||||||
on PROT_READ does not imply PROT_EXEC for 32bit processes
|
on PROT_READ does not imply PROT_EXEC for 32bit processes (default)
|
||||||
off PROT_READ implies PROT_EXEC (default)
|
off PROT_READ implies PROT_EXEC
|
||||||
*/
|
*/
|
||||||
static int __init nonx32_setup(char *str)
|
static int __init nonx32_setup(char *str)
|
||||||
{
|
{
|
||||||
|
|
|
@ -812,10 +812,10 @@ void __init setup_arch(char **cmdline_p)
|
||||||
efi_init();
|
efi_init();
|
||||||
|
|
||||||
/* update e820 for memory not covered by WB MTRRs */
|
/* update e820 for memory not covered by WB MTRRs */
|
||||||
find_max_pfn();
|
propagate_e820_map();
|
||||||
mtrr_bp_init();
|
mtrr_bp_init();
|
||||||
if (mtrr_trim_uncached_memory(max_pfn))
|
if (mtrr_trim_uncached_memory(max_pfn))
|
||||||
find_max_pfn();
|
propagate_e820_map();
|
||||||
|
|
||||||
max_low_pfn = setup_memory();
|
max_low_pfn = setup_memory();
|
||||||
|
|
||||||
|
|
|
@ -398,6 +398,8 @@ void __init setup_arch(char **cmdline_p)
|
||||||
|
|
||||||
early_res_to_bootmem();
|
early_res_to_bootmem();
|
||||||
|
|
||||||
|
dma32_reserve_bootmem();
|
||||||
|
|
||||||
#ifdef CONFIG_ACPI_SLEEP
|
#ifdef CONFIG_ACPI_SLEEP
|
||||||
/*
|
/*
|
||||||
* Reserve low memory region for sleep support.
|
* Reserve low memory region for sleep support.
|
||||||
|
@ -420,11 +422,14 @@ void __init setup_arch(char **cmdline_p)
|
||||||
unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
|
unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
|
||||||
|
|
||||||
if (ramdisk_end <= end_of_mem) {
|
if (ramdisk_end <= end_of_mem) {
|
||||||
reserve_bootmem_generic(ramdisk_image, ramdisk_size);
|
/*
|
||||||
|
* don't need to reserve again, already reserved early
|
||||||
|
* in x86_64_start_kernel, and early_res_to_bootmem
|
||||||
|
* convert that to reserved in bootmem
|
||||||
|
*/
|
||||||
initrd_start = ramdisk_image + PAGE_OFFSET;
|
initrd_start = ramdisk_image + PAGE_OFFSET;
|
||||||
initrd_end = initrd_start+ramdisk_size;
|
initrd_end = initrd_start+ramdisk_size;
|
||||||
} else {
|
} else {
|
||||||
/* Assumes everything on node 0 */
|
|
||||||
free_bootmem(ramdisk_image, ramdisk_size);
|
free_bootmem(ramdisk_image, ramdisk_size);
|
||||||
printk(KERN_ERR "initrd extends beyond end of memory "
|
printk(KERN_ERR "initrd extends beyond end of memory "
|
||||||
"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
|
"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
|
||||||
|
|
|
@ -61,6 +61,7 @@
|
||||||
#include <asm/mtrr.h>
|
#include <asm/mtrr.h>
|
||||||
#include <asm/nmi.h>
|
#include <asm/nmi.h>
|
||||||
#include <asm/vmi.h>
|
#include <asm/vmi.h>
|
||||||
|
#include <asm/genapic.h>
|
||||||
#include <linux/mc146818rtc.h>
|
#include <linux/mc146818rtc.h>
|
||||||
|
|
||||||
#include <mach_apic.h>
|
#include <mach_apic.h>
|
||||||
|
@ -677,6 +678,12 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
|
||||||
unsigned long send_status, accept_status = 0;
|
unsigned long send_status, accept_status = 0;
|
||||||
int maxlvt, num_starts, j;
|
int maxlvt, num_starts, j;
|
||||||
|
|
||||||
|
if (get_uv_system_type() == UV_NON_UNIQUE_APIC) {
|
||||||
|
send_status = uv_wakeup_secondary(phys_apicid, start_eip);
|
||||||
|
atomic_set(&init_deasserted, 1);
|
||||||
|
return send_status;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Be paranoid about clearing APIC errors.
|
* Be paranoid about clearing APIC errors.
|
||||||
*/
|
*/
|
||||||
|
@ -918,16 +925,19 @@ do_rest:
|
||||||
|
|
||||||
atomic_set(&init_deasserted, 0);
|
atomic_set(&init_deasserted, 0);
|
||||||
|
|
||||||
Dprintk("Setting warm reset code and vector.\n");
|
if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
|
||||||
|
|
||||||
store_NMI_vector(&nmi_high, &nmi_low);
|
Dprintk("Setting warm reset code and vector.\n");
|
||||||
|
|
||||||
smpboot_setup_warm_reset_vector(start_ip);
|
store_NMI_vector(&nmi_high, &nmi_low);
|
||||||
/*
|
|
||||||
* Be paranoid about clearing APIC errors.
|
smpboot_setup_warm_reset_vector(start_ip);
|
||||||
*/
|
/*
|
||||||
apic_write(APIC_ESR, 0);
|
* Be paranoid about clearing APIC errors.
|
||||||
apic_read(APIC_ESR);
|
*/
|
||||||
|
apic_write(APIC_ESR, 0);
|
||||||
|
apic_read(APIC_ESR);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Starting actual IPI sequence...
|
* Starting actual IPI sequence...
|
||||||
|
@ -966,7 +976,8 @@ do_rest:
|
||||||
else
|
else
|
||||||
/* trampoline code not run */
|
/* trampoline code not run */
|
||||||
printk(KERN_ERR "Not responding.\n");
|
printk(KERN_ERR "Not responding.\n");
|
||||||
inquire_remote_apic(apicid);
|
if (get_uv_system_type() != UV_NON_UNIQUE_APIC)
|
||||||
|
inquire_remote_apic(apicid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -681,7 +681,7 @@ gp_in_kernel:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static __kprobes void
|
static notrace __kprobes void
|
||||||
mem_parity_error(unsigned char reason, struct pt_regs *regs)
|
mem_parity_error(unsigned char reason, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
printk(KERN_EMERG
|
printk(KERN_EMERG
|
||||||
|
@ -707,7 +707,7 @@ mem_parity_error(unsigned char reason, struct pt_regs *regs)
|
||||||
clear_mem_error(reason);
|
clear_mem_error(reason);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __kprobes void
|
static notrace __kprobes void
|
||||||
io_check_error(unsigned char reason, struct pt_regs *regs)
|
io_check_error(unsigned char reason, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
|
@ -727,7 +727,7 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
|
||||||
outb(reason, 0x61);
|
outb(reason, 0x61);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __kprobes void
|
static notrace __kprobes void
|
||||||
unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
|
unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
|
if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
|
||||||
|
@ -755,7 +755,7 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(nmi_print_lock);
|
static DEFINE_SPINLOCK(nmi_print_lock);
|
||||||
|
|
||||||
void __kprobes die_nmi(struct pt_regs *regs, const char *msg)
|
void notrace __kprobes die_nmi(struct pt_regs *regs, const char *msg)
|
||||||
{
|
{
|
||||||
if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) == NOTIFY_STOP)
|
if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) == NOTIFY_STOP)
|
||||||
return;
|
return;
|
||||||
|
@ -786,7 +786,7 @@ void __kprobes die_nmi(struct pt_regs *regs, const char *msg)
|
||||||
do_exit(SIGSEGV);
|
do_exit(SIGSEGV);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __kprobes void default_do_nmi(struct pt_regs *regs)
|
static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
unsigned char reason = 0;
|
unsigned char reason = 0;
|
||||||
|
|
||||||
|
@ -828,7 +828,7 @@ static __kprobes void default_do_nmi(struct pt_regs *regs)
|
||||||
|
|
||||||
static int ignore_nmis;
|
static int ignore_nmis;
|
||||||
|
|
||||||
__kprobes void do_nmi(struct pt_regs *regs, long error_code)
|
notrace __kprobes void do_nmi(struct pt_regs *regs, long error_code)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
|
@ -1148,9 +1148,22 @@ asmlinkage void math_state_restore(void)
|
||||||
struct thread_info *thread = current_thread_info();
|
struct thread_info *thread = current_thread_info();
|
||||||
struct task_struct *tsk = thread->task;
|
struct task_struct *tsk = thread->task;
|
||||||
|
|
||||||
|
if (!tsk_used_math(tsk)) {
|
||||||
|
local_irq_enable();
|
||||||
|
/*
|
||||||
|
* does a slab alloc which can sleep
|
||||||
|
*/
|
||||||
|
if (init_fpu(tsk)) {
|
||||||
|
/*
|
||||||
|
* ran out of memory!
|
||||||
|
*/
|
||||||
|
do_group_exit(SIGKILL);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
local_irq_disable();
|
||||||
|
}
|
||||||
|
|
||||||
clts(); /* Allow maths ops (or we recurse) */
|
clts(); /* Allow maths ops (or we recurse) */
|
||||||
if (!tsk_used_math(tsk))
|
|
||||||
init_fpu(tsk);
|
|
||||||
restore_fpu(tsk);
|
restore_fpu(tsk);
|
||||||
thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
|
thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
|
||||||
tsk->fpu_counter++;
|
tsk->fpu_counter++;
|
||||||
|
@ -1208,11 +1221,6 @@ void __init trap_init(void)
|
||||||
#endif
|
#endif
|
||||||
set_trap_gate(19, &simd_coprocessor_error);
|
set_trap_gate(19, &simd_coprocessor_error);
|
||||||
|
|
||||||
/*
|
|
||||||
* Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
|
|
||||||
* Generate a build-time error if the alignment is wrong.
|
|
||||||
*/
|
|
||||||
BUILD_BUG_ON(offsetof(struct task_struct, thread.i387.fxsave) & 15);
|
|
||||||
if (cpu_has_fxsr) {
|
if (cpu_has_fxsr) {
|
||||||
printk(KERN_INFO "Enabling fast FPU save and restore... ");
|
printk(KERN_INFO "Enabling fast FPU save and restore... ");
|
||||||
set_in_cr4(X86_CR4_OSFXSR);
|
set_in_cr4(X86_CR4_OSFXSR);
|
||||||
|
@ -1233,6 +1241,7 @@ void __init trap_init(void)
|
||||||
|
|
||||||
set_bit(SYSCALL_VECTOR, used_vectors);
|
set_bit(SYSCALL_VECTOR, used_vectors);
|
||||||
|
|
||||||
|
init_thread_xstate();
|
||||||
/*
|
/*
|
||||||
* Should be a barrier for any external CPU state:
|
* Should be a barrier for any external CPU state:
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -600,7 +600,8 @@ void die(const char * str, struct pt_regs * regs, long err)
|
||||||
oops_end(flags, regs, SIGSEGV);
|
oops_end(flags, regs, SIGSEGV);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
|
notrace __kprobes void
|
||||||
|
die_nmi(char *str, struct pt_regs *regs, int do_panic)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -772,7 +773,7 @@ asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
|
||||||
die("general protection fault", regs, error_code);
|
die("general protection fault", regs, error_code);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __kprobes void
|
static notrace __kprobes void
|
||||||
mem_parity_error(unsigned char reason, struct pt_regs * regs)
|
mem_parity_error(unsigned char reason, struct pt_regs * regs)
|
||||||
{
|
{
|
||||||
printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
|
printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
|
||||||
|
@ -796,7 +797,7 @@ mem_parity_error(unsigned char reason, struct pt_regs * regs)
|
||||||
outb(reason, 0x61);
|
outb(reason, 0x61);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __kprobes void
|
static notrace __kprobes void
|
||||||
io_check_error(unsigned char reason, struct pt_regs * regs)
|
io_check_error(unsigned char reason, struct pt_regs * regs)
|
||||||
{
|
{
|
||||||
printk("NMI: IOCK error (debug interrupt?)\n");
|
printk("NMI: IOCK error (debug interrupt?)\n");
|
||||||
|
@ -810,7 +811,7 @@ io_check_error(unsigned char reason, struct pt_regs * regs)
|
||||||
outb(reason, 0x61);
|
outb(reason, 0x61);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __kprobes void
|
static notrace __kprobes void
|
||||||
unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
|
unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
|
||||||
{
|
{
|
||||||
if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
|
if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
|
||||||
|
@ -827,7 +828,7 @@ unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
|
||||||
|
|
||||||
/* Runs on IST stack. This code must keep interrupts off all the time.
|
/* Runs on IST stack. This code must keep interrupts off all the time.
|
||||||
Nested NMIs are prevented by the CPU. */
|
Nested NMIs are prevented by the CPU. */
|
||||||
asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
|
asmlinkage notrace __kprobes void default_do_nmi(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
unsigned char reason = 0;
|
unsigned char reason = 0;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
@ -1123,11 +1124,24 @@ asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
|
||||||
asmlinkage void math_state_restore(void)
|
asmlinkage void math_state_restore(void)
|
||||||
{
|
{
|
||||||
struct task_struct *me = current;
|
struct task_struct *me = current;
|
||||||
clts(); /* Allow maths ops (or we recurse) */
|
|
||||||
|
|
||||||
if (!used_math())
|
if (!used_math()) {
|
||||||
init_fpu(me);
|
local_irq_enable();
|
||||||
restore_fpu_checking(&me->thread.i387.fxsave);
|
/*
|
||||||
|
* does a slab alloc which can sleep
|
||||||
|
*/
|
||||||
|
if (init_fpu(me)) {
|
||||||
|
/*
|
||||||
|
* ran out of memory!
|
||||||
|
*/
|
||||||
|
do_group_exit(SIGKILL);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
local_irq_disable();
|
||||||
|
}
|
||||||
|
|
||||||
|
clts(); /* Allow maths ops (or we recurse) */
|
||||||
|
restore_fpu_checking(&me->thread.xstate->fxsave);
|
||||||
task_thread_info(me)->status |= TS_USEDFPU;
|
task_thread_info(me)->status |= TS_USEDFPU;
|
||||||
me->fpu_counter++;
|
me->fpu_counter++;
|
||||||
}
|
}
|
||||||
|
@ -1162,6 +1176,10 @@ void __init trap_init(void)
|
||||||
set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
|
set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* initialize the per thread extended state:
|
||||||
|
*/
|
||||||
|
init_thread_xstate();
|
||||||
/*
|
/*
|
||||||
* Should be a barrier for any external CPU state.
|
* Should be a barrier for any external CPU state.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -221,9 +221,9 @@ EXPORT_SYMBOL(recalibrate_cpu_khz);
|
||||||
* if the CPU frequency is scaled, TSC-based delays will need a different
|
* if the CPU frequency is scaled, TSC-based delays will need a different
|
||||||
* loops_per_jiffy value to function properly.
|
* loops_per_jiffy value to function properly.
|
||||||
*/
|
*/
|
||||||
static unsigned int ref_freq = 0;
|
static unsigned int ref_freq;
|
||||||
static unsigned long loops_per_jiffy_ref = 0;
|
static unsigned long loops_per_jiffy_ref;
|
||||||
static unsigned long cpu_khz_ref = 0;
|
static unsigned long cpu_khz_ref;
|
||||||
|
|
||||||
static int
|
static int
|
||||||
time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
|
time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
|
||||||
|
@ -283,15 +283,28 @@ core_initcall(cpufreq_tsc);
|
||||||
|
|
||||||
/* clock source code */
|
/* clock source code */
|
||||||
|
|
||||||
static unsigned long current_tsc_khz = 0;
|
static unsigned long current_tsc_khz;
|
||||||
|
static struct clocksource clocksource_tsc;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We compare the TSC to the cycle_last value in the clocksource
|
||||||
|
* structure to avoid a nasty time-warp issue. This can be observed in
|
||||||
|
* a very small window right after one CPU updated cycle_last under
|
||||||
|
* xtime lock and the other CPU reads a TSC value which is smaller
|
||||||
|
* than the cycle_last reference value due to a TSC which is slighty
|
||||||
|
* behind. This delta is nowhere else observable, but in that case it
|
||||||
|
* results in a forward time jump in the range of hours due to the
|
||||||
|
* unsigned delta calculation of the time keeping core code, which is
|
||||||
|
* necessary to support wrapping clocksources like pm timer.
|
||||||
|
*/
|
||||||
static cycle_t read_tsc(void)
|
static cycle_t read_tsc(void)
|
||||||
{
|
{
|
||||||
cycle_t ret;
|
cycle_t ret;
|
||||||
|
|
||||||
rdtscll(ret);
|
rdtscll(ret);
|
||||||
|
|
||||||
return ret;
|
return ret >= clocksource_tsc.cycle_last ?
|
||||||
|
ret : clocksource_tsc.cycle_last;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct clocksource clocksource_tsc = {
|
static struct clocksource clocksource_tsc = {
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
#include <asm/hpet.h>
|
#include <asm/hpet.h>
|
||||||
#include <asm/timex.h>
|
#include <asm/timex.h>
|
||||||
#include <asm/timer.h>
|
#include <asm/timer.h>
|
||||||
|
#include <asm/vgtod.h>
|
||||||
|
|
||||||
static int notsc __initdata = 0;
|
static int notsc __initdata = 0;
|
||||||
|
|
||||||
|
@ -287,18 +288,34 @@ int __init notsc_setup(char *s)
|
||||||
|
|
||||||
__setup("notsc", notsc_setup);
|
__setup("notsc", notsc_setup);
|
||||||
|
|
||||||
|
static struct clocksource clocksource_tsc;
|
||||||
|
|
||||||
/* clock source code: */
|
/*
|
||||||
|
* We compare the TSC to the cycle_last value in the clocksource
|
||||||
|
* structure to avoid a nasty time-warp. This can be observed in a
|
||||||
|
* very small window right after one CPU updated cycle_last under
|
||||||
|
* xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
|
||||||
|
* is smaller than the cycle_last reference value due to a TSC which
|
||||||
|
* is slighty behind. This delta is nowhere else observable, but in
|
||||||
|
* that case it results in a forward time jump in the range of hours
|
||||||
|
* due to the unsigned delta calculation of the time keeping core
|
||||||
|
* code, which is necessary to support wrapping clocksources like pm
|
||||||
|
* timer.
|
||||||
|
*/
|
||||||
static cycle_t read_tsc(void)
|
static cycle_t read_tsc(void)
|
||||||
{
|
{
|
||||||
cycle_t ret = (cycle_t)get_cycles();
|
cycle_t ret = (cycle_t)get_cycles();
|
||||||
return ret;
|
|
||||||
|
return ret >= clocksource_tsc.cycle_last ?
|
||||||
|
ret : clocksource_tsc.cycle_last;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t __vsyscall_fn vread_tsc(void)
|
static cycle_t __vsyscall_fn vread_tsc(void)
|
||||||
{
|
{
|
||||||
cycle_t ret = (cycle_t)vget_cycles();
|
cycle_t ret = (cycle_t)vget_cycles();
|
||||||
return ret;
|
|
||||||
|
return ret >= __vsyscall_gtod_data.clock.cycle_last ?
|
||||||
|
ret : __vsyscall_gtod_data.clock.cycle_last;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct clocksource clocksource_tsc = {
|
static struct clocksource clocksource_tsc = {
|
||||||
|
|
|
@ -1,6 +1,4 @@
|
||||||
/*
|
/*
|
||||||
* linux/arch/i386/mach-visws/visws_apic.c
|
|
||||||
*
|
|
||||||
* Copyright (C) 1999 Bent Hagemark, Ingo Molnar
|
* Copyright (C) 1999 Bent Hagemark, Ingo Molnar
|
||||||
*
|
*
|
||||||
* SGI Visual Workstation interrupt controller
|
* SGI Visual Workstation interrupt controller
|
||||||
|
|
|
@ -2,8 +2,6 @@
|
||||||
*
|
*
|
||||||
* Author: J.E.J.Bottomley@HansenPartnership.com
|
* Author: J.E.J.Bottomley@HansenPartnership.com
|
||||||
*
|
*
|
||||||
* linux/arch/i386/kernel/voyager.c
|
|
||||||
*
|
|
||||||
* This file contains all the voyager specific routines for getting
|
* This file contains all the voyager specific routines for getting
|
||||||
* initialisation of the architecture to function. For additional
|
* initialisation of the architecture to function. For additional
|
||||||
* features see:
|
* features see:
|
||||||
|
|
|
@ -4,8 +4,6 @@
|
||||||
*
|
*
|
||||||
* Author: J.E.J.Bottomley@HansenPartnership.com
|
* Author: J.E.J.Bottomley@HansenPartnership.com
|
||||||
*
|
*
|
||||||
* linux/arch/i386/kernel/voyager_cat.c
|
|
||||||
*
|
|
||||||
* This file contains all the logic for manipulating the CAT bus
|
* This file contains all the logic for manipulating the CAT bus
|
||||||
* in a level 5 machine.
|
* in a level 5 machine.
|
||||||
*
|
*
|
||||||
|
|
|
@ -4,8 +4,6 @@
|
||||||
*
|
*
|
||||||
* Author: J.E.J.Bottomley@HansenPartnership.com
|
* Author: J.E.J.Bottomley@HansenPartnership.com
|
||||||
*
|
*
|
||||||
* linux/arch/i386/kernel/voyager_smp.c
|
|
||||||
*
|
|
||||||
* This file provides all the same external entries as smp.c but uses
|
* This file provides all the same external entries as smp.c but uses
|
||||||
* the voyager hal to provide the functionality
|
* the voyager hal to provide the functionality
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -4,8 +4,6 @@
|
||||||
*
|
*
|
||||||
* Author: J.E.J.Bottomley@HansenPartnership.com
|
* Author: J.E.J.Bottomley@HansenPartnership.com
|
||||||
*
|
*
|
||||||
* linux/arch/i386/kernel/voyager_thread.c
|
|
||||||
*
|
|
||||||
* This module provides the machine status monitor thread for the
|
* This module provides the machine status monitor thread for the
|
||||||
* voyager architecture. This allows us to monitor the machine
|
* voyager architecture. This allows us to monitor the machine
|
||||||
* environment (temp, voltage, fan function) and the front panel and
|
* environment (temp, voltage, fan function) and the front panel and
|
||||||
|
|
|
@ -678,7 +678,7 @@ int fpregs_soft_set(struct task_struct *target,
|
||||||
unsigned int pos, unsigned int count,
|
unsigned int pos, unsigned int count,
|
||||||
const void *kbuf, const void __user *ubuf)
|
const void *kbuf, const void __user *ubuf)
|
||||||
{
|
{
|
||||||
struct i387_soft_struct *s387 = &target->thread.i387.soft;
|
struct i387_soft_struct *s387 = &target->thread.xstate->soft;
|
||||||
void *space = s387->st_space;
|
void *space = s387->st_space;
|
||||||
int ret;
|
int ret;
|
||||||
int offset, other, i, tags, regnr, tag, newtop;
|
int offset, other, i, tags, regnr, tag, newtop;
|
||||||
|
@ -730,7 +730,7 @@ int fpregs_soft_get(struct task_struct *target,
|
||||||
unsigned int pos, unsigned int count,
|
unsigned int pos, unsigned int count,
|
||||||
void *kbuf, void __user *ubuf)
|
void *kbuf, void __user *ubuf)
|
||||||
{
|
{
|
||||||
struct i387_soft_struct *s387 = &target->thread.i387.soft;
|
struct i387_soft_struct *s387 = &target->thread.xstate->soft;
|
||||||
const void *space = s387->st_space;
|
const void *space = s387->st_space;
|
||||||
int ret;
|
int ret;
|
||||||
int offset = (S387->ftop & 7) * 10, other = 80 - offset;
|
int offset = (S387->ftop & 7) * 10, other = 80 - offset;
|
||||||
|
|
|
@ -35,8 +35,8 @@
|
||||||
#define SEG_EXPAND_DOWN(s) (((s).b & ((1 << 11) | (1 << 10))) \
|
#define SEG_EXPAND_DOWN(s) (((s).b & ((1 << 11) | (1 << 10))) \
|
||||||
== (1 << 10))
|
== (1 << 10))
|
||||||
|
|
||||||
#define I387 (current->thread.i387)
|
#define I387 (current->thread.xstate)
|
||||||
#define FPU_info (I387.soft.info)
|
#define FPU_info (I387->soft.info)
|
||||||
|
|
||||||
#define FPU_CS (*(unsigned short *) &(FPU_info->___cs))
|
#define FPU_CS (*(unsigned short *) &(FPU_info->___cs))
|
||||||
#define FPU_SS (*(unsigned short *) &(FPU_info->___ss))
|
#define FPU_SS (*(unsigned short *) &(FPU_info->___ss))
|
||||||
|
@ -46,25 +46,25 @@
|
||||||
#define FPU_EIP (FPU_info->___eip)
|
#define FPU_EIP (FPU_info->___eip)
|
||||||
#define FPU_ORIG_EIP (FPU_info->___orig_eip)
|
#define FPU_ORIG_EIP (FPU_info->___orig_eip)
|
||||||
|
|
||||||
#define FPU_lookahead (I387.soft.lookahead)
|
#define FPU_lookahead (I387->soft.lookahead)
|
||||||
|
|
||||||
/* nz if ip_offset and cs_selector are not to be set for the current
|
/* nz if ip_offset and cs_selector are not to be set for the current
|
||||||
instruction. */
|
instruction. */
|
||||||
#define no_ip_update (*(u_char *)&(I387.soft.no_update))
|
#define no_ip_update (*(u_char *)&(I387->soft.no_update))
|
||||||
#define FPU_rm (*(u_char *)&(I387.soft.rm))
|
#define FPU_rm (*(u_char *)&(I387->soft.rm))
|
||||||
|
|
||||||
/* Number of bytes of data which can be legally accessed by the current
|
/* Number of bytes of data which can be legally accessed by the current
|
||||||
instruction. This only needs to hold a number <= 108, so a byte will do. */
|
instruction. This only needs to hold a number <= 108, so a byte will do. */
|
||||||
#define access_limit (*(u_char *)&(I387.soft.alimit))
|
#define access_limit (*(u_char *)&(I387->soft.alimit))
|
||||||
|
|
||||||
#define partial_status (I387.soft.swd)
|
#define partial_status (I387->soft.swd)
|
||||||
#define control_word (I387.soft.cwd)
|
#define control_word (I387->soft.cwd)
|
||||||
#define fpu_tag_word (I387.soft.twd)
|
#define fpu_tag_word (I387->soft.twd)
|
||||||
#define registers (I387.soft.st_space)
|
#define registers (I387->soft.st_space)
|
||||||
#define top (I387.soft.ftop)
|
#define top (I387->soft.ftop)
|
||||||
|
|
||||||
#define instruction_address (*(struct address *)&I387.soft.fip)
|
#define instruction_address (*(struct address *)&I387->soft.fip)
|
||||||
#define operand_address (*(struct address *)&I387.soft.foo)
|
#define operand_address (*(struct address *)&I387->soft.foo)
|
||||||
|
|
||||||
#define FPU_access_ok(x,y,z) if ( !access_ok(x,y,z) ) \
|
#define FPU_access_ok(x,y,z) if ( !access_ok(x,y,z) ) \
|
||||||
math_abort(FPU_info,SIGSEGV)
|
math_abort(FPU_info,SIGSEGV)
|
||||||
|
|
|
@ -1180,8 +1180,8 @@ u_char __user *fstenv(fpu_addr_modes addr_modes, u_char __user *d)
|
||||||
control_word |= 0xffff0040;
|
control_word |= 0xffff0040;
|
||||||
partial_status = status_word() | 0xffff0000;
|
partial_status = status_word() | 0xffff0000;
|
||||||
fpu_tag_word |= 0xffff0000;
|
fpu_tag_word |= 0xffff0000;
|
||||||
I387.soft.fcs &= ~0xf8000000;
|
I387->soft.fcs &= ~0xf8000000;
|
||||||
I387.soft.fos |= 0xffff0000;
|
I387->soft.fos |= 0xffff0000;
|
||||||
#endif /* PECULIAR_486 */
|
#endif /* PECULIAR_486 */
|
||||||
if (__copy_to_user(d, &control_word, 7 * 4))
|
if (__copy_to_user(d, &control_word, 7 * 4))
|
||||||
FPU_abort;
|
FPU_abort;
|
||||||
|
|
|
@ -120,7 +120,7 @@ int __init get_memcfg_numa_flat(void)
|
||||||
printk("NUMA - single node, flat memory mode\n");
|
printk("NUMA - single node, flat memory mode\n");
|
||||||
|
|
||||||
/* Run the memory configuration and find the top of memory. */
|
/* Run the memory configuration and find the top of memory. */
|
||||||
find_max_pfn();
|
propagate_e820_map();
|
||||||
node_start_pfn[0] = 0;
|
node_start_pfn[0] = 0;
|
||||||
node_end_pfn[0] = max_pfn;
|
node_end_pfn[0] = max_pfn;
|
||||||
memory_present(0, 0, max_pfn);
|
memory_present(0, 0, max_pfn);
|
||||||
|
@ -134,7 +134,7 @@ int __init get_memcfg_numa_flat(void)
|
||||||
/*
|
/*
|
||||||
* Find the highest page frame number we have available for the node
|
* Find the highest page frame number we have available for the node
|
||||||
*/
|
*/
|
||||||
static void __init find_max_pfn_node(int nid)
|
static void __init propagate_e820_map_node(int nid)
|
||||||
{
|
{
|
||||||
if (node_end_pfn[nid] > max_pfn)
|
if (node_end_pfn[nid] > max_pfn)
|
||||||
node_end_pfn[nid] = max_pfn;
|
node_end_pfn[nid] = max_pfn;
|
||||||
|
@ -379,7 +379,7 @@ unsigned long __init setup_memory(void)
|
||||||
printk("High memory starts at vaddr %08lx\n",
|
printk("High memory starts at vaddr %08lx\n",
|
||||||
(ulong) pfn_to_kaddr(highstart_pfn));
|
(ulong) pfn_to_kaddr(highstart_pfn));
|
||||||
for_each_online_node(nid)
|
for_each_online_node(nid)
|
||||||
find_max_pfn_node(nid);
|
propagate_e820_map_node(nid);
|
||||||
|
|
||||||
memset(NODE_DATA(0), 0, sizeof(struct pglist_data));
|
memset(NODE_DATA(0), 0, sizeof(struct pglist_data));
|
||||||
NODE_DATA(0)->bdata = &node0_bdata;
|
NODE_DATA(0)->bdata = &node0_bdata;
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
/*
|
/*
|
||||||
* linux/arch/i386/mm/init.c
|
|
||||||
*
|
*
|
||||||
* Copyright (C) 1995 Linus Torvalds
|
* Copyright (C) 1995 Linus Torvalds
|
||||||
*
|
*
|
||||||
|
|
|
@ -47,9 +47,6 @@
|
||||||
#include <asm/numa.h>
|
#include <asm/numa.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
|
|
||||||
const struct dma_mapping_ops *dma_ops;
|
|
||||||
EXPORT_SYMBOL(dma_ops);
|
|
||||||
|
|
||||||
static unsigned long dma_reserve __initdata;
|
static unsigned long dma_reserve __initdata;
|
||||||
|
|
||||||
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
|
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
|
||||||
|
|
|
@ -134,7 +134,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
|
||||||
|
|
||||||
if (!phys_addr_valid(phys_addr)) {
|
if (!phys_addr_valid(phys_addr)) {
|
||||||
printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
|
printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
|
||||||
phys_addr);
|
(unsigned long long)phys_addr);
|
||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(1);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -187,7 +187,8 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
|
||||||
new_prot_val == _PAGE_CACHE_WB)) {
|
new_prot_val == _PAGE_CACHE_WB)) {
|
||||||
pr_debug(
|
pr_debug(
|
||||||
"ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
|
"ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
|
||||||
phys_addr, phys_addr + size,
|
(unsigned long long)phys_addr,
|
||||||
|
(unsigned long long)(phys_addr + size),
|
||||||
prot_val, new_prot_val);
|
prot_val, new_prot_val);
|
||||||
free_memtype(phys_addr, phys_addr + size);
|
free_memtype(phys_addr, phys_addr + size);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
@ -164,7 +164,7 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
|
||||||
if (!found)
|
if (!found)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
memnode_shift = compute_hash_shift(nodes, 8);
|
memnode_shift = compute_hash_shift(nodes, 8, NULL);
|
||||||
if (memnode_shift < 0) {
|
if (memnode_shift < 0) {
|
||||||
printk(KERN_ERR "No NUMA node hash function found. Contact maintainer\n");
|
printk(KERN_ERR "No NUMA node hash function found. Contact maintainer\n");
|
||||||
return -1;
|
return -1;
|
||||||
|
|
|
@ -60,7 +60,7 @@ unsigned long __initdata nodemap_size;
|
||||||
* -1 if node overlap or lost ram (shift too big)
|
* -1 if node overlap or lost ram (shift too big)
|
||||||
*/
|
*/
|
||||||
static int __init populate_memnodemap(const struct bootnode *nodes,
|
static int __init populate_memnodemap(const struct bootnode *nodes,
|
||||||
int numnodes, int shift)
|
int numnodes, int shift, int *nodeids)
|
||||||
{
|
{
|
||||||
unsigned long addr, end;
|
unsigned long addr, end;
|
||||||
int i, res = -1;
|
int i, res = -1;
|
||||||
|
@ -76,7 +76,12 @@ static int __init populate_memnodemap(const struct bootnode *nodes,
|
||||||
do {
|
do {
|
||||||
if (memnodemap[addr >> shift] != NUMA_NO_NODE)
|
if (memnodemap[addr >> shift] != NUMA_NO_NODE)
|
||||||
return -1;
|
return -1;
|
||||||
memnodemap[addr >> shift] = i;
|
|
||||||
|
if (!nodeids)
|
||||||
|
memnodemap[addr >> shift] = i;
|
||||||
|
else
|
||||||
|
memnodemap[addr >> shift] = nodeids[i];
|
||||||
|
|
||||||
addr += (1UL << shift);
|
addr += (1UL << shift);
|
||||||
} while (addr < end);
|
} while (addr < end);
|
||||||
res = 1;
|
res = 1;
|
||||||
|
@ -139,7 +144,8 @@ static int __init extract_lsb_from_nodes(const struct bootnode *nodes,
|
||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
|
|
||||||
int __init compute_hash_shift(struct bootnode *nodes, int numnodes)
|
int __init compute_hash_shift(struct bootnode *nodes, int numnodes,
|
||||||
|
int *nodeids)
|
||||||
{
|
{
|
||||||
int shift;
|
int shift;
|
||||||
|
|
||||||
|
@ -149,7 +155,7 @@ int __init compute_hash_shift(struct bootnode *nodes, int numnodes)
|
||||||
printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
|
printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
|
||||||
shift);
|
shift);
|
||||||
|
|
||||||
if (populate_memnodemap(nodes, numnodes, shift) != 1) {
|
if (populate_memnodemap(nodes, numnodes, shift, nodeids) != 1) {
|
||||||
printk(KERN_INFO "Your memory is not aligned you need to "
|
printk(KERN_INFO "Your memory is not aligned you need to "
|
||||||
"rebuild your kernel with a bigger NODEMAPSIZE "
|
"rebuild your kernel with a bigger NODEMAPSIZE "
|
||||||
"shift=%d\n", shift);
|
"shift=%d\n", shift);
|
||||||
|
@ -462,7 +468,7 @@ done:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
memnode_shift = compute_hash_shift(nodes, num_nodes);
|
memnode_shift = compute_hash_shift(nodes, num_nodes, NULL);
|
||||||
if (memnode_shift < 0) {
|
if (memnode_shift < 0) {
|
||||||
memnode_shift = 0;
|
memnode_shift = 0;
|
||||||
printk(KERN_ERR "No NUMA hash function found. NUMA emulation "
|
printk(KERN_ERR "No NUMA hash function found. NUMA emulation "
|
||||||
|
|
|
@ -1,7 +1,3 @@
|
||||||
/*
|
|
||||||
* linux/arch/i386/mm/pgtable.c
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
|
|
|
@ -32,6 +32,10 @@ static struct bootnode nodes_add[MAX_NUMNODES];
|
||||||
static int found_add_area __initdata;
|
static int found_add_area __initdata;
|
||||||
int hotadd_percent __initdata = 0;
|
int hotadd_percent __initdata = 0;
|
||||||
|
|
||||||
|
static int num_node_memblks __initdata;
|
||||||
|
static struct bootnode node_memblk_range[NR_NODE_MEMBLKS] __initdata;
|
||||||
|
static int memblk_nodeid[NR_NODE_MEMBLKS] __initdata;
|
||||||
|
|
||||||
/* Too small nodes confuse the VM badly. Usually they result
|
/* Too small nodes confuse the VM badly. Usually they result
|
||||||
from BIOS bugs. */
|
from BIOS bugs. */
|
||||||
#define NODE_MIN_SIZE (4*1024*1024)
|
#define NODE_MIN_SIZE (4*1024*1024)
|
||||||
|
@ -41,17 +45,17 @@ static __init int setup_node(int pxm)
|
||||||
return acpi_map_pxm_to_node(pxm);
|
return acpi_map_pxm_to_node(pxm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __init int conflicting_nodes(unsigned long start, unsigned long end)
|
static __init int conflicting_memblks(unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
for_each_node_mask(i, nodes_parsed) {
|
for (i = 0; i < num_node_memblks; i++) {
|
||||||
struct bootnode *nd = &nodes[i];
|
struct bootnode *nd = &node_memblk_range[i];
|
||||||
if (nd->start == nd->end)
|
if (nd->start == nd->end)
|
||||||
continue;
|
continue;
|
||||||
if (nd->end > start && nd->start < end)
|
if (nd->end > start && nd->start < end)
|
||||||
return i;
|
return memblk_nodeid[i];
|
||||||
if (nd->end == end && nd->start == start)
|
if (nd->end == end && nd->start == start)
|
||||||
return i;
|
return memblk_nodeid[i];
|
||||||
}
|
}
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -258,7 +262,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
|
||||||
bad_srat();
|
bad_srat();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
i = conflicting_nodes(start, end);
|
i = conflicting_memblks(start, end);
|
||||||
if (i == node) {
|
if (i == node) {
|
||||||
printk(KERN_WARNING
|
printk(KERN_WARNING
|
||||||
"SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n",
|
"SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n",
|
||||||
|
@ -283,10 +287,10 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
|
||||||
nd->end = end;
|
nd->end = end;
|
||||||
}
|
}
|
||||||
|
|
||||||
printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm,
|
printk(KERN_INFO "SRAT: Node %u PXM %u %lx-%lx\n", node, pxm,
|
||||||
nd->start, nd->end);
|
start, end);
|
||||||
e820_register_active_regions(node, nd->start >> PAGE_SHIFT,
|
e820_register_active_regions(node, start >> PAGE_SHIFT,
|
||||||
nd->end >> PAGE_SHIFT);
|
end >> PAGE_SHIFT);
|
||||||
push_node_boundaries(node, nd->start >> PAGE_SHIFT,
|
push_node_boundaries(node, nd->start >> PAGE_SHIFT,
|
||||||
nd->end >> PAGE_SHIFT);
|
nd->end >> PAGE_SHIFT);
|
||||||
|
|
||||||
|
@ -298,6 +302,11 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
|
||||||
if ((nd->start | nd->end) == 0)
|
if ((nd->start | nd->end) == 0)
|
||||||
node_clear(node, nodes_parsed);
|
node_clear(node, nodes_parsed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
node_memblk_range[num_node_memblks].start = start;
|
||||||
|
node_memblk_range[num_node_memblks].end = end;
|
||||||
|
memblk_nodeid[num_node_memblks] = node;
|
||||||
|
num_node_memblks++;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Sanity check to catch more bad SRATs (they are amazingly common).
|
/* Sanity check to catch more bad SRATs (they are amazingly common).
|
||||||
|
@ -368,7 +377,8 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
memnode_shift = compute_hash_shift(nodes, MAX_NUMNODES);
|
memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks,
|
||||||
|
memblk_nodeid);
|
||||||
if (memnode_shift < 0) {
|
if (memnode_shift < 0) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"SRAT: No NUMA node hash function found. Contact maintainer\n");
|
"SRAT: No NUMA node hash function found. Contact maintainer\n");
|
||||||
|
|
|
@ -37,7 +37,8 @@ $(obj)/%.so: OBJCOPYFLAGS := -S
|
||||||
$(obj)/%.so: $(obj)/%.so.dbg FORCE
|
$(obj)/%.so: $(obj)/%.so.dbg FORCE
|
||||||
$(call if_changed,objcopy)
|
$(call if_changed,objcopy)
|
||||||
|
|
||||||
CFL := $(PROFILING) -mcmodel=small -fPIC -g0 -O2 -fasynchronous-unwind-tables -m64
|
CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
|
||||||
|
$(filter -g%,$(KBUILD_CFLAGS))
|
||||||
|
|
||||||
$(vobjs): KBUILD_CFLAGS += $(CFL)
|
$(vobjs): KBUILD_CFLAGS += $(CFL)
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
/*
|
/*
|
||||||
* arch/i386/video/fbdev.c - i386 Framebuffer
|
|
||||||
*
|
*
|
||||||
* Copyright (C) 2007 Antonino Daplas <adaplas@gmail.com>
|
* Copyright (C) 2007 Antonino Daplas <adaplas@gmail.com>
|
||||||
*
|
*
|
||||||
|
|
|
@ -17,4 +17,12 @@
|
||||||
+ (CONFIG_PHYSICAL_ALIGN - 1)) \
|
+ (CONFIG_PHYSICAL_ALIGN - 1)) \
|
||||||
& ~(CONFIG_PHYSICAL_ALIGN - 1))
|
& ~(CONFIG_PHYSICAL_ALIGN - 1))
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
#define BOOT_HEAP_SIZE 0x7000
|
||||||
|
#define BOOT_STACK_SIZE 0x4000
|
||||||
|
#else
|
||||||
|
#define BOOT_HEAP_SIZE 0x4000
|
||||||
|
#define BOOT_STACK_SIZE 0x1000
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* _ASM_BOOT_H */
|
#endif /* _ASM_BOOT_H */
|
||||||
|
|
|
@ -1,5 +1,237 @@
|
||||||
|
#ifndef _ASM_DMA_MAPPING_H_
|
||||||
|
#define _ASM_DMA_MAPPING_H_
|
||||||
|
|
||||||
|
/*
|
||||||
|
* IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
|
||||||
|
* documentation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
|
#include <asm/io.h>
|
||||||
|
#include <asm/swiotlb.h>
|
||||||
|
|
||||||
|
extern dma_addr_t bad_dma_address;
|
||||||
|
extern int iommu_merge;
|
||||||
|
extern struct device fallback_dev;
|
||||||
|
extern int panic_on_overflow;
|
||||||
|
extern int forbid_dac;
|
||||||
|
extern int force_iommu;
|
||||||
|
|
||||||
|
struct dma_mapping_ops {
|
||||||
|
int (*mapping_error)(dma_addr_t dma_addr);
|
||||||
|
void* (*alloc_coherent)(struct device *dev, size_t size,
|
||||||
|
dma_addr_t *dma_handle, gfp_t gfp);
|
||||||
|
void (*free_coherent)(struct device *dev, size_t size,
|
||||||
|
void *vaddr, dma_addr_t dma_handle);
|
||||||
|
dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
|
||||||
|
size_t size, int direction);
|
||||||
|
/* like map_single, but doesn't check the device mask */
|
||||||
|
dma_addr_t (*map_simple)(struct device *hwdev, phys_addr_t ptr,
|
||||||
|
size_t size, int direction);
|
||||||
|
void (*unmap_single)(struct device *dev, dma_addr_t addr,
|
||||||
|
size_t size, int direction);
|
||||||
|
void (*sync_single_for_cpu)(struct device *hwdev,
|
||||||
|
dma_addr_t dma_handle, size_t size,
|
||||||
|
int direction);
|
||||||
|
void (*sync_single_for_device)(struct device *hwdev,
|
||||||
|
dma_addr_t dma_handle, size_t size,
|
||||||
|
int direction);
|
||||||
|
void (*sync_single_range_for_cpu)(struct device *hwdev,
|
||||||
|
dma_addr_t dma_handle, unsigned long offset,
|
||||||
|
size_t size, int direction);
|
||||||
|
void (*sync_single_range_for_device)(struct device *hwdev,
|
||||||
|
dma_addr_t dma_handle, unsigned long offset,
|
||||||
|
size_t size, int direction);
|
||||||
|
void (*sync_sg_for_cpu)(struct device *hwdev,
|
||||||
|
struct scatterlist *sg, int nelems,
|
||||||
|
int direction);
|
||||||
|
void (*sync_sg_for_device)(struct device *hwdev,
|
||||||
|
struct scatterlist *sg, int nelems,
|
||||||
|
int direction);
|
||||||
|
int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
|
||||||
|
int nents, int direction);
|
||||||
|
void (*unmap_sg)(struct device *hwdev,
|
||||||
|
struct scatterlist *sg, int nents,
|
||||||
|
int direction);
|
||||||
|
int (*dma_supported)(struct device *hwdev, u64 mask);
|
||||||
|
int is_phys;
|
||||||
|
};
|
||||||
|
|
||||||
|
extern const struct dma_mapping_ops *dma_ops;
|
||||||
|
|
||||||
|
static inline int dma_mapping_error(dma_addr_t dma_addr)
|
||||||
|
{
|
||||||
|
if (dma_ops->mapping_error)
|
||||||
|
return dma_ops->mapping_error(dma_addr);
|
||||||
|
|
||||||
|
return (dma_addr == bad_dma_address);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||||
|
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
||||||
|
|
||||||
|
void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||||
|
dma_addr_t *dma_handle, gfp_t flag);
|
||||||
|
|
||||||
|
void dma_free_coherent(struct device *dev, size_t size,
|
||||||
|
void *vaddr, dma_addr_t dma_handle);
|
||||||
|
|
||||||
|
|
||||||
|
extern int dma_supported(struct device *hwdev, u64 mask);
|
||||||
|
extern int dma_set_mask(struct device *dev, u64 mask);
|
||||||
|
|
||||||
|
static inline dma_addr_t
|
||||||
|
dma_map_single(struct device *hwdev, void *ptr, size_t size,
|
||||||
|
int direction)
|
||||||
|
{
|
||||||
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
|
return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
|
||||||
|
int direction)
|
||||||
|
{
|
||||||
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
|
if (dma_ops->unmap_single)
|
||||||
|
dma_ops->unmap_single(dev, addr, size, direction);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
dma_map_sg(struct device *hwdev, struct scatterlist *sg,
|
||||||
|
int nents, int direction)
|
||||||
|
{
|
||||||
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
|
return dma_ops->map_sg(hwdev, sg, nents, direction);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
|
||||||
|
int direction)
|
||||||
|
{
|
||||||
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
|
if (dma_ops->unmap_sg)
|
||||||
|
dma_ops->unmap_sg(hwdev, sg, nents, direction);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
|
||||||
|
size_t size, int direction)
|
||||||
|
{
|
||||||
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
|
if (dma_ops->sync_single_for_cpu)
|
||||||
|
dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
|
||||||
|
direction);
|
||||||
|
flush_write_buffers();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
|
||||||
|
size_t size, int direction)
|
||||||
|
{
|
||||||
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
|
if (dma_ops->sync_single_for_device)
|
||||||
|
dma_ops->sync_single_for_device(hwdev, dma_handle, size,
|
||||||
|
direction);
|
||||||
|
flush_write_buffers();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
|
||||||
|
unsigned long offset, size_t size, int direction)
|
||||||
|
{
|
||||||
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
|
if (dma_ops->sync_single_range_for_cpu)
|
||||||
|
dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
|
||||||
|
size, direction);
|
||||||
|
|
||||||
|
flush_write_buffers();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
|
||||||
|
unsigned long offset, size_t size,
|
||||||
|
int direction)
|
||||||
|
{
|
||||||
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
|
if (dma_ops->sync_single_range_for_device)
|
||||||
|
dma_ops->sync_single_range_for_device(hwdev, dma_handle,
|
||||||
|
offset, size, direction);
|
||||||
|
|
||||||
|
flush_write_buffers();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
|
||||||
|
int nelems, int direction)
|
||||||
|
{
|
||||||
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
|
if (dma_ops->sync_sg_for_cpu)
|
||||||
|
dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
|
||||||
|
flush_write_buffers();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
|
||||||
|
int nelems, int direction)
|
||||||
|
{
|
||||||
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
|
if (dma_ops->sync_sg_for_device)
|
||||||
|
dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
|
||||||
|
|
||||||
|
flush_write_buffers();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
||||||
|
size_t offset, size_t size,
|
||||||
|
int direction)
|
||||||
|
{
|
||||||
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
|
return dma_ops->map_single(dev, page_to_phys(page)+offset,
|
||||||
|
size, direction);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
|
||||||
|
size_t size, int direction)
|
||||||
|
{
|
||||||
|
dma_unmap_single(dev, addr, size, direction);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||||
|
enum dma_data_direction dir)
|
||||||
|
{
|
||||||
|
flush_write_buffers();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int dma_get_cache_alignment(void)
|
||||||
|
{
|
||||||
|
/* no easy way to get cache size on all x86, so return the
|
||||||
|
* maximum possible, to be safe */
|
||||||
|
return boot_cpu_data.x86_clflush_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define dma_is_consistent(d, h) (1)
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
# include "dma-mapping_32.h"
|
# define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
|
||||||
#else
|
struct dma_coherent_mem {
|
||||||
# include "dma-mapping_64.h"
|
void *virt_base;
|
||||||
|
u32 device_base;
|
||||||
|
int size;
|
||||||
|
int flags;
|
||||||
|
unsigned long *bitmap;
|
||||||
|
};
|
||||||
|
|
||||||
|
extern int
|
||||||
|
dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
|
||||||
|
dma_addr_t device_addr, size_t size, int flags);
|
||||||
|
|
||||||
|
extern void
|
||||||
|
dma_release_declared_memory(struct device *dev);
|
||||||
|
|
||||||
|
extern void *
|
||||||
|
dma_mark_declared_memory_occupied(struct device *dev,
|
||||||
|
dma_addr_t device_addr, size_t size);
|
||||||
|
#endif /* CONFIG_X86_32 */
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1,187 +0,0 @@
|
||||||
#ifndef _ASM_I386_DMA_MAPPING_H
|
|
||||||
#define _ASM_I386_DMA_MAPPING_H
|
|
||||||
|
|
||||||
#include <linux/mm.h>
|
|
||||||
#include <linux/scatterlist.h>
|
|
||||||
|
|
||||||
#include <asm/cache.h>
|
|
||||||
#include <asm/io.h>
|
|
||||||
#include <asm/bug.h>
|
|
||||||
|
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
|
||||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
|
||||||
|
|
||||||
void *dma_alloc_coherent(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t flag);
|
|
||||||
|
|
||||||
void dma_free_coherent(struct device *dev, size_t size,
|
|
||||||
void *vaddr, dma_addr_t dma_handle);
|
|
||||||
|
|
||||||
static inline dma_addr_t
|
|
||||||
dma_map_single(struct device *dev, void *ptr, size_t size,
|
|
||||||
enum dma_data_direction direction)
|
|
||||||
{
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
|
||||||
WARN_ON(size == 0);
|
|
||||||
flush_write_buffers();
|
|
||||||
return virt_to_phys(ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
|
||||||
enum dma_data_direction direction)
|
|
||||||
{
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int
|
|
||||||
dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
|
||||||
enum dma_data_direction direction)
|
|
||||||
{
|
|
||||||
struct scatterlist *sg;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
|
||||||
WARN_ON(nents == 0 || sglist[0].length == 0);
|
|
||||||
|
|
||||||
for_each_sg(sglist, sg, nents, i) {
|
|
||||||
BUG_ON(!sg_page(sg));
|
|
||||||
|
|
||||||
sg->dma_address = sg_phys(sg);
|
|
||||||
}
|
|
||||||
|
|
||||||
flush_write_buffers();
|
|
||||||
return nents;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline dma_addr_t
|
|
||||||
dma_map_page(struct device *dev, struct page *page, unsigned long offset,
|
|
||||||
size_t size, enum dma_data_direction direction)
|
|
||||||
{
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
|
||||||
return page_to_phys(page) + offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
|
|
||||||
enum dma_data_direction direction)
|
|
||||||
{
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
|
|
||||||
enum dma_data_direction direction)
|
|
||||||
{
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
|
|
||||||
enum dma_data_direction direction)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
|
|
||||||
enum dma_data_direction direction)
|
|
||||||
{
|
|
||||||
flush_write_buffers();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
|
||||||
unsigned long offset, size_t size,
|
|
||||||
enum dma_data_direction direction)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
|
|
||||||
unsigned long offset, size_t size,
|
|
||||||
enum dma_data_direction direction)
|
|
||||||
{
|
|
||||||
flush_write_buffers();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
|
|
||||||
enum dma_data_direction direction)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
|
|
||||||
enum dma_data_direction direction)
|
|
||||||
{
|
|
||||||
flush_write_buffers();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int
|
|
||||||
dma_mapping_error(dma_addr_t dma_addr)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
extern int forbid_dac;
|
|
||||||
|
|
||||||
static inline int
|
|
||||||
dma_supported(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* we fall back to GFP_DMA when the mask isn't all 1s,
|
|
||||||
* so we can't guarantee allocations that must be
|
|
||||||
* within a tighter range than GFP_DMA..
|
|
||||||
*/
|
|
||||||
if(mask < 0x00ffffff)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* Work around chipset bugs */
|
|
||||||
if (forbid_dac > 0 && mask > 0xffffffffULL)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int
|
|
||||||
dma_set_mask(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
if(!dev->dma_mask || !dma_supported(dev, mask))
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
*dev->dma_mask = mask;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int
|
|
||||||
dma_get_cache_alignment(void)
|
|
||||||
{
|
|
||||||
/* no easy way to get cache size on all x86, so return the
|
|
||||||
* maximum possible, to be safe */
|
|
||||||
return (1 << INTERNODE_CACHE_SHIFT);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_is_consistent(d, h) (1)
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
|
||||||
enum dma_data_direction direction)
|
|
||||||
{
|
|
||||||
flush_write_buffers();
|
|
||||||
}
|
|
||||||
|
|
||||||
#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
|
|
||||||
extern int
|
|
||||||
dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
|
|
||||||
dma_addr_t device_addr, size_t size, int flags);
|
|
||||||
|
|
||||||
extern void
|
|
||||||
dma_release_declared_memory(struct device *dev);
|
|
||||||
|
|
||||||
extern void *
|
|
||||||
dma_mark_declared_memory_occupied(struct device *dev,
|
|
||||||
dma_addr_t device_addr, size_t size);
|
|
||||||
|
|
||||||
#endif
|
|
|
@ -1,202 +0,0 @@
|
||||||
#ifndef _X8664_DMA_MAPPING_H
|
|
||||||
#define _X8664_DMA_MAPPING_H 1
|
|
||||||
|
|
||||||
/*
|
|
||||||
* IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
|
|
||||||
* documentation.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/scatterlist.h>
|
|
||||||
#include <asm/io.h>
|
|
||||||
#include <asm/swiotlb.h>
|
|
||||||
|
|
||||||
struct dma_mapping_ops {
|
|
||||||
int (*mapping_error)(dma_addr_t dma_addr);
|
|
||||||
void* (*alloc_coherent)(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t gfp);
|
|
||||||
void (*free_coherent)(struct device *dev, size_t size,
|
|
||||||
void *vaddr, dma_addr_t dma_handle);
|
|
||||||
dma_addr_t (*map_single)(struct device *hwdev, void *ptr,
|
|
||||||
size_t size, int direction);
|
|
||||||
/* like map_single, but doesn't check the device mask */
|
|
||||||
dma_addr_t (*map_simple)(struct device *hwdev, char *ptr,
|
|
||||||
size_t size, int direction);
|
|
||||||
void (*unmap_single)(struct device *dev, dma_addr_t addr,
|
|
||||||
size_t size, int direction);
|
|
||||||
void (*sync_single_for_cpu)(struct device *hwdev,
|
|
||||||
dma_addr_t dma_handle, size_t size,
|
|
||||||
int direction);
|
|
||||||
void (*sync_single_for_device)(struct device *hwdev,
|
|
||||||
dma_addr_t dma_handle, size_t size,
|
|
||||||
int direction);
|
|
||||||
void (*sync_single_range_for_cpu)(struct device *hwdev,
|
|
||||||
dma_addr_t dma_handle, unsigned long offset,
|
|
||||||
size_t size, int direction);
|
|
||||||
void (*sync_single_range_for_device)(struct device *hwdev,
|
|
||||||
dma_addr_t dma_handle, unsigned long offset,
|
|
||||||
size_t size, int direction);
|
|
||||||
void (*sync_sg_for_cpu)(struct device *hwdev,
|
|
||||||
struct scatterlist *sg, int nelems,
|
|
||||||
int direction);
|
|
||||||
void (*sync_sg_for_device)(struct device *hwdev,
|
|
||||||
struct scatterlist *sg, int nelems,
|
|
||||||
int direction);
|
|
||||||
int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
|
|
||||||
int nents, int direction);
|
|
||||||
void (*unmap_sg)(struct device *hwdev,
|
|
||||||
struct scatterlist *sg, int nents,
|
|
||||||
int direction);
|
|
||||||
int (*dma_supported)(struct device *hwdev, u64 mask);
|
|
||||||
int is_phys;
|
|
||||||
};
|
|
||||||
|
|
||||||
extern dma_addr_t bad_dma_address;
|
|
||||||
extern const struct dma_mapping_ops* dma_ops;
|
|
||||||
extern int iommu_merge;
|
|
||||||
|
|
||||||
static inline int dma_mapping_error(dma_addr_t dma_addr)
|
|
||||||
{
|
|
||||||
if (dma_ops->mapping_error)
|
|
||||||
return dma_ops->mapping_error(dma_addr);
|
|
||||||
|
|
||||||
return (dma_addr == bad_dma_address);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
|
||||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
|
||||||
|
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
|
||||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
|
||||||
|
|
||||||
extern void *dma_alloc_coherent(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t gfp);
|
|
||||||
extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
|
|
||||||
dma_addr_t dma_handle);
|
|
||||||
|
|
||||||
static inline dma_addr_t
|
|
||||||
dma_map_single(struct device *hwdev, void *ptr, size_t size,
|
|
||||||
int direction)
|
|
||||||
{
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
|
||||||
return dma_ops->map_single(hwdev, ptr, size, direction);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
|
|
||||||
int direction)
|
|
||||||
{
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
|
||||||
dma_ops->unmap_single(dev, addr, size, direction);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_map_page(dev,page,offset,size,dir) \
|
|
||||||
dma_map_single((dev), page_address(page)+(offset), (size), (dir))
|
|
||||||
|
|
||||||
#define dma_unmap_page dma_unmap_single
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
|
|
||||||
size_t size, int direction)
|
|
||||||
{
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
|
||||||
if (dma_ops->sync_single_for_cpu)
|
|
||||||
dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
|
|
||||||
direction);
|
|
||||||
flush_write_buffers();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
|
|
||||||
size_t size, int direction)
|
|
||||||
{
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
|
||||||
if (dma_ops->sync_single_for_device)
|
|
||||||
dma_ops->sync_single_for_device(hwdev, dma_handle, size,
|
|
||||||
direction);
|
|
||||||
flush_write_buffers();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
|
|
||||||
unsigned long offset, size_t size, int direction)
|
|
||||||
{
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
|
||||||
if (dma_ops->sync_single_range_for_cpu) {
|
|
||||||
dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
|
|
||||||
}
|
|
||||||
|
|
||||||
flush_write_buffers();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
|
|
||||||
unsigned long offset, size_t size, int direction)
|
|
||||||
{
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
|
||||||
if (dma_ops->sync_single_range_for_device)
|
|
||||||
dma_ops->sync_single_range_for_device(hwdev, dma_handle,
|
|
||||||
offset, size, direction);
|
|
||||||
|
|
||||||
flush_write_buffers();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
|
|
||||||
int nelems, int direction)
|
|
||||||
{
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
|
||||||
if (dma_ops->sync_sg_for_cpu)
|
|
||||||
dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
|
|
||||||
flush_write_buffers();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
|
|
||||||
int nelems, int direction)
|
|
||||||
{
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
|
||||||
if (dma_ops->sync_sg_for_device) {
|
|
||||||
dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
|
|
||||||
}
|
|
||||||
|
|
||||||
flush_write_buffers();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int
|
|
||||||
dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
|
|
||||||
{
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
|
||||||
return dma_ops->map_sg(hwdev, sg, nents, direction);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
|
|
||||||
int direction)
|
|
||||||
{
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
|
||||||
dma_ops->unmap_sg(hwdev, sg, nents, direction);
|
|
||||||
}
|
|
||||||
|
|
||||||
extern int dma_supported(struct device *hwdev, u64 mask);
|
|
||||||
|
|
||||||
/* same for gart, swiotlb, and nommu */
|
|
||||||
static inline int dma_get_cache_alignment(void)
|
|
||||||
{
|
|
||||||
return boot_cpu_data.x86_clflush_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_is_consistent(d, h) 1
|
|
||||||
|
|
||||||
extern int dma_set_mask(struct device *dev, u64 mask);
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
|
||||||
enum dma_data_direction dir)
|
|
||||||
{
|
|
||||||
flush_write_buffers();
|
|
||||||
}
|
|
||||||
|
|
||||||
extern struct device fallback_dev;
|
|
||||||
extern int panic_on_overflow;
|
|
||||||
|
|
||||||
#endif /* _X8664_DMA_MAPPING_H */
|
|
|
@ -24,7 +24,7 @@ extern void update_e820(void);
|
||||||
extern int e820_all_mapped(unsigned long start, unsigned long end,
|
extern int e820_all_mapped(unsigned long start, unsigned long end,
|
||||||
unsigned type);
|
unsigned type);
|
||||||
extern int e820_any_mapped(u64 start, u64 end, unsigned type);
|
extern int e820_any_mapped(u64 start, u64 end, unsigned type);
|
||||||
extern void find_max_pfn(void);
|
extern void propagate_e820_map(void);
|
||||||
extern void register_bootmem_low_pages(unsigned long max_low_pfn);
|
extern void register_bootmem_low_pages(unsigned long max_low_pfn);
|
||||||
extern void add_memory_region(unsigned long long start,
|
extern void add_memory_region(unsigned long long start,
|
||||||
unsigned long long size, int type);
|
unsigned long long size, int type);
|
||||||
|
|
|
@ -117,6 +117,7 @@ extern struct genapic *genapic;
|
||||||
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
|
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
|
||||||
#define get_uv_system_type() UV_NONE
|
#define get_uv_system_type() UV_NONE
|
||||||
#define is_uv_system() 0
|
#define is_uv_system() 0
|
||||||
|
#define uv_wakeup_secondary(a, b) 1
|
||||||
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -21,8 +21,9 @@
|
||||||
|
|
||||||
extern void fpu_init(void);
|
extern void fpu_init(void);
|
||||||
extern void mxcsr_feature_mask_init(void);
|
extern void mxcsr_feature_mask_init(void);
|
||||||
extern void init_fpu(struct task_struct *child);
|
extern int init_fpu(struct task_struct *child);
|
||||||
extern asmlinkage void math_state_restore(void);
|
extern asmlinkage void math_state_restore(void);
|
||||||
|
extern void init_thread_xstate(void);
|
||||||
|
|
||||||
extern user_regset_active_fn fpregs_active, xfpregs_active;
|
extern user_regset_active_fn fpregs_active, xfpregs_active;
|
||||||
extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get;
|
extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get;
|
||||||
|
@ -117,24 +118,22 @@ static inline void __save_init_fpu(struct task_struct *tsk)
|
||||||
/* Using "fxsaveq %0" would be the ideal choice, but is only supported
|
/* Using "fxsaveq %0" would be the ideal choice, but is only supported
|
||||||
starting with gas 2.16. */
|
starting with gas 2.16. */
|
||||||
__asm__ __volatile__("fxsaveq %0"
|
__asm__ __volatile__("fxsaveq %0"
|
||||||
: "=m" (tsk->thread.i387.fxsave));
|
: "=m" (tsk->thread.xstate->fxsave));
|
||||||
#elif 0
|
#elif 0
|
||||||
/* Using, as a workaround, the properly prefixed form below isn't
|
/* Using, as a workaround, the properly prefixed form below isn't
|
||||||
accepted by any binutils version so far released, complaining that
|
accepted by any binutils version so far released, complaining that
|
||||||
the same type of prefix is used twice if an extended register is
|
the same type of prefix is used twice if an extended register is
|
||||||
needed for addressing (fix submitted to mainline 2005-11-21). */
|
needed for addressing (fix submitted to mainline 2005-11-21). */
|
||||||
__asm__ __volatile__("rex64/fxsave %0"
|
__asm__ __volatile__("rex64/fxsave %0"
|
||||||
: "=m" (tsk->thread.i387.fxsave));
|
: "=m" (tsk->thread.xstate->fxsave));
|
||||||
#else
|
#else
|
||||||
/* This, however, we can work around by forcing the compiler to select
|
/* This, however, we can work around by forcing the compiler to select
|
||||||
an addressing mode that doesn't require extended registers. */
|
an addressing mode that doesn't require extended registers. */
|
||||||
__asm__ __volatile__("rex64/fxsave %P2(%1)"
|
__asm__ __volatile__("rex64/fxsave (%1)"
|
||||||
: "=m" (tsk->thread.i387.fxsave)
|
: "=m" (tsk->thread.xstate->fxsave)
|
||||||
: "cdaSDb" (tsk),
|
: "cdaSDb" (&tsk->thread.xstate->fxsave));
|
||||||
"i" (offsetof(__typeof__(*tsk),
|
|
||||||
thread.i387.fxsave)));
|
|
||||||
#endif
|
#endif
|
||||||
clear_fpu_state(&tsk->thread.i387.fxsave);
|
clear_fpu_state(&tsk->thread.xstate->fxsave);
|
||||||
task_thread_info(tsk)->status &= ~TS_USEDFPU;
|
task_thread_info(tsk)->status &= ~TS_USEDFPU;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -148,7 +147,7 @@ static inline int save_i387(struct _fpstate __user *buf)
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
|
BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
|
||||||
sizeof(tsk->thread.i387.fxsave));
|
sizeof(tsk->thread.xstate->fxsave));
|
||||||
|
|
||||||
if ((unsigned long)buf % 16)
|
if ((unsigned long)buf % 16)
|
||||||
printk("save_i387: bad fpstate %p\n", buf);
|
printk("save_i387: bad fpstate %p\n", buf);
|
||||||
|
@ -164,7 +163,7 @@ static inline int save_i387(struct _fpstate __user *buf)
|
||||||
task_thread_info(tsk)->status &= ~TS_USEDFPU;
|
task_thread_info(tsk)->status &= ~TS_USEDFPU;
|
||||||
stts();
|
stts();
|
||||||
} else {
|
} else {
|
||||||
if (__copy_to_user(buf, &tsk->thread.i387.fxsave,
|
if (__copy_to_user(buf, &tsk->thread.xstate->fxsave,
|
||||||
sizeof(struct i387_fxsave_struct)))
|
sizeof(struct i387_fxsave_struct)))
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -201,7 +200,7 @@ static inline void restore_fpu(struct task_struct *tsk)
|
||||||
"nop ; frstor %1",
|
"nop ; frstor %1",
|
||||||
"fxrstor %1",
|
"fxrstor %1",
|
||||||
X86_FEATURE_FXSR,
|
X86_FEATURE_FXSR,
|
||||||
"m" ((tsk)->thread.i387.fxsave));
|
"m" (tsk->thread.xstate->fxsave));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We need a safe address that is cheap to find and that is already
|
/* We need a safe address that is cheap to find and that is already
|
||||||
|
@ -225,8 +224,8 @@ static inline void __save_init_fpu(struct task_struct *tsk)
|
||||||
"fxsave %[fx]\n"
|
"fxsave %[fx]\n"
|
||||||
"bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
|
"bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
|
||||||
X86_FEATURE_FXSR,
|
X86_FEATURE_FXSR,
|
||||||
[fx] "m" (tsk->thread.i387.fxsave),
|
[fx] "m" (tsk->thread.xstate->fxsave),
|
||||||
[fsw] "m" (tsk->thread.i387.fxsave.swd) : "memory");
|
[fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory");
|
||||||
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
|
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
|
||||||
is pending. Clear the x87 state here by setting it to fixed
|
is pending. Clear the x87 state here by setting it to fixed
|
||||||
values. safe_address is a random variable that should be in L1 */
|
values. safe_address is a random variable that should be in L1 */
|
||||||
|
@ -327,25 +326,25 @@ static inline void clear_fpu(struct task_struct *tsk)
|
||||||
static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
|
static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
if (cpu_has_fxsr) {
|
if (cpu_has_fxsr) {
|
||||||
return tsk->thread.i387.fxsave.cwd;
|
return tsk->thread.xstate->fxsave.cwd;
|
||||||
} else {
|
} else {
|
||||||
return (unsigned short)tsk->thread.i387.fsave.cwd;
|
return (unsigned short)tsk->thread.xstate->fsave.cwd;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned short get_fpu_swd(struct task_struct *tsk)
|
static inline unsigned short get_fpu_swd(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
if (cpu_has_fxsr) {
|
if (cpu_has_fxsr) {
|
||||||
return tsk->thread.i387.fxsave.swd;
|
return tsk->thread.xstate->fxsave.swd;
|
||||||
} else {
|
} else {
|
||||||
return (unsigned short)tsk->thread.i387.fsave.swd;
|
return (unsigned short)tsk->thread.xstate->fsave.swd;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
|
static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
if (cpu_has_xmm) {
|
if (cpu_has_xmm) {
|
||||||
return tsk->thread.i387.fxsave.mxcsr;
|
return tsk->thread.xstate->fxsave.mxcsr;
|
||||||
} else {
|
} else {
|
||||||
return MXCSR_DEFAULT;
|
return MXCSR_DEFAULT;
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,8 @@ struct bootnode {
|
||||||
u64 end;
|
u64 end;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern int compute_hash_shift(struct bootnode *nodes, int numnodes);
|
extern int compute_hash_shift(struct bootnode *nodes, int numblks,
|
||||||
|
int *nodeids);
|
||||||
|
|
||||||
#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
|
#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,7 @@ extern int (*pci_config_read)(int seg, int bus, int dev, int fn,
|
||||||
extern int (*pci_config_write)(int seg, int bus, int dev, int fn,
|
extern int (*pci_config_write)(int seg, int bus, int dev, int fn,
|
||||||
int reg, int len, u32 value);
|
int reg, int len, u32 value);
|
||||||
|
|
||||||
|
extern void dma32_reserve_bootmem(void);
|
||||||
extern void pci_iommu_alloc(void);
|
extern void pci_iommu_alloc(void);
|
||||||
|
|
||||||
/* The PCI address space does equal the physical memory
|
/* The PCI address space does equal the physical memory
|
||||||
|
|
|
@ -354,7 +354,7 @@ struct i387_soft_struct {
|
||||||
u32 entry_eip;
|
u32 entry_eip;
|
||||||
};
|
};
|
||||||
|
|
||||||
union i387_union {
|
union thread_xstate {
|
||||||
struct i387_fsave_struct fsave;
|
struct i387_fsave_struct fsave;
|
||||||
struct i387_fxsave_struct fxsave;
|
struct i387_fxsave_struct fxsave;
|
||||||
struct i387_soft_struct soft;
|
struct i387_soft_struct soft;
|
||||||
|
@ -365,6 +365,9 @@ DECLARE_PER_CPU(struct orig_ist, orig_ist);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern void print_cpu_info(struct cpuinfo_x86 *);
|
extern void print_cpu_info(struct cpuinfo_x86 *);
|
||||||
|
extern unsigned int xstate_size;
|
||||||
|
extern void free_thread_xstate(struct task_struct *);
|
||||||
|
extern struct kmem_cache *task_xstate_cachep;
|
||||||
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
|
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
|
||||||
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
|
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
|
||||||
extern unsigned short num_cache_leaves;
|
extern unsigned short num_cache_leaves;
|
||||||
|
@ -397,8 +400,8 @@ struct thread_struct {
|
||||||
unsigned long cr2;
|
unsigned long cr2;
|
||||||
unsigned long trap_no;
|
unsigned long trap_no;
|
||||||
unsigned long error_code;
|
unsigned long error_code;
|
||||||
/* Floating point info: */
|
/* floating point and extended processor state */
|
||||||
union i387_union i387 __attribute__((aligned(16)));;
|
union thread_xstate *xstate;
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
/* Virtual 86 mode info */
|
/* Virtual 86 mode info */
|
||||||
struct vm86_struct __user *vm86_info;
|
struct vm86_struct __user *vm86_info;
|
||||||
|
@ -918,4 +921,11 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
|
||||||
|
|
||||||
#define KSTK_EIP(task) (task_pt_regs(task)->ip)
|
#define KSTK_EIP(task) (task_pt_regs(task)->ip)
|
||||||
|
|
||||||
|
/* Get/set a process' ability to use the timestamp counter instruction */
|
||||||
|
#define GET_TSC_CTL(adr) get_tsc_mode((adr))
|
||||||
|
#define SET_TSC_CTL(val) set_tsc_mode((val))
|
||||||
|
|
||||||
|
extern int get_tsc_mode(unsigned long adr);
|
||||||
|
extern int set_tsc_mode(unsigned int val);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -11,9 +11,7 @@ struct scatterlist {
|
||||||
unsigned int offset;
|
unsigned int offset;
|
||||||
unsigned int length;
|
unsigned int length;
|
||||||
dma_addr_t dma_address;
|
dma_addr_t dma_address;
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
unsigned int dma_length;
|
unsigned int dma_length;
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define ARCH_HAS_SG_CHAIN
|
#define ARCH_HAS_SG_CHAIN
|
||||||
|
|
|
@ -1,5 +1,14 @@
|
||||||
|
#ifndef _ASM_X86_THREAD_INFO_H
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
# include "thread_info_32.h"
|
# include "thread_info_32.h"
|
||||||
#else
|
#else
|
||||||
# include "thread_info_64.h"
|
# include "thread_info_64.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef __ASSEMBLY__
|
||||||
|
extern void arch_task_cache_init(void);
|
||||||
|
extern void free_thread_info(struct thread_info *ti);
|
||||||
|
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
|
||||||
|
#define arch_task_cache_init arch_task_cache_init
|
||||||
|
#endif
|
||||||
|
#endif /* _ASM_X86_THREAD_INFO_H */
|
||||||
|
|
|
@ -102,8 +102,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||||
__get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE)))
|
__get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE)))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define free_thread_info(info) free_pages((unsigned long)(info), get_order(THREAD_SIZE))
|
|
||||||
|
|
||||||
#else /* !__ASSEMBLY__ */
|
#else /* !__ASSEMBLY__ */
|
||||||
|
|
||||||
/* how to get the thread information struct from ASM */
|
/* how to get the thread information struct from ASM */
|
||||||
|
|
|
@ -85,8 +85,6 @@ static inline struct thread_info *stack_thread_info(void)
|
||||||
#define alloc_thread_info(tsk) \
|
#define alloc_thread_info(tsk) \
|
||||||
((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
|
((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
|
||||||
|
|
||||||
#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
|
|
||||||
|
|
||||||
#else /* !__ASSEMBLY__ */
|
#else /* !__ASSEMBLY__ */
|
||||||
|
|
||||||
/* how to get the thread information struct from ASM */
|
/* how to get the thread information struct from ASM */
|
||||||
|
@ -126,6 +124,7 @@ static inline struct thread_info *stack_thread_info(void)
|
||||||
#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
|
#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
|
||||||
#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
|
#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
|
||||||
#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */
|
#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */
|
||||||
|
#define TIF_NOTSC 28 /* TSC is not accessible in userland */
|
||||||
|
|
||||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||||
|
@ -147,6 +146,7 @@ static inline struct thread_info *stack_thread_info(void)
|
||||||
#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
|
#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
|
||||||
#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
|
#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
|
||||||
#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS)
|
#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS)
|
||||||
|
#define _TIF_NOTSC (1 << TIF_NOTSC)
|
||||||
|
|
||||||
/* work to do on interrupt/exception return */
|
/* work to do on interrupt/exception return */
|
||||||
#define _TIF_WORK_MASK \
|
#define _TIF_WORK_MASK \
|
||||||
|
@ -160,7 +160,7 @@ static inline struct thread_info *stack_thread_info(void)
|
||||||
|
|
||||||
/* flags to check in __switch_to() */
|
/* flags to check in __switch_to() */
|
||||||
#define _TIF_WORK_CTXSW \
|
#define _TIF_WORK_CTXSW \
|
||||||
(_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS)
|
(_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS|_TIF_NOTSC)
|
||||||
#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
|
#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
|
||||||
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
|
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue