Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
This commit is contained in:
commit
2c1179afc3
11 changed files with 133 additions and 28 deletions
|
@ -754,11 +754,11 @@ config IOMMU_API
|
|||
def_bool (AMD_IOMMU || DMAR)
|
||||
|
||||
config MAXSMP
|
||||
bool "Configure Maximum number of SMP Processors and NUMA Nodes"
|
||||
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
|
||||
depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
|
||||
select CPUMASK_OFFSTACK
|
||||
---help---
|
||||
Configure maximum number of CPUS and NUMA Nodes for this architecture.
|
||||
Enable maximum number of CPUS and NUMA Nodes for this architecture.
|
||||
If unsure, say N.
|
||||
|
||||
config NR_CPUS
|
||||
|
|
|
@ -2866,6 +2866,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
|
|||
*/
|
||||
if (id_data[0] == id_data[6] && id_data[1] == id_data[7] &&
|
||||
id_data[0] == NAND_MFR_SAMSUNG &&
|
||||
(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
|
||||
id_data[5] != 0x00) {
|
||||
/* Calc pagesize */
|
||||
mtd->writesize = 2048 << (extid & 0x03);
|
||||
|
@ -2934,14 +2935,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
|
|||
chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1;
|
||||
|
||||
/* Set the bad block position */
|
||||
if (!(busw & NAND_BUSWIDTH_16) && (*maf_id == NAND_MFR_STMICRO ||
|
||||
(*maf_id == NAND_MFR_SAMSUNG &&
|
||||
mtd->writesize == 512) ||
|
||||
*maf_id == NAND_MFR_AMD))
|
||||
chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
|
||||
else
|
||||
if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
|
||||
chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
|
||||
|
||||
else
|
||||
chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
|
||||
|
||||
/* Get chip options, preserve non chip based options */
|
||||
chip->options &= ~NAND_CHIPOPTIONS_MSK;
|
||||
|
|
|
@ -363,7 +363,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = {
|
|||
#define tAR_NDTR1(r) (((r) >> 0) & 0xf)
|
||||
|
||||
/* convert nano-seconds to nand flash controller clock cycles */
|
||||
#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) - 1)
|
||||
#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
|
||||
|
||||
/* convert nand flash controller clock cycles to nano-seconds */
|
||||
#define cycle2ns(c, clk) ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
|
||||
|
|
|
@ -134,7 +134,7 @@ struct vm_area_struct {
|
|||
within vm_mm. */
|
||||
|
||||
/* linked list of VM areas per task, sorted by address */
|
||||
struct vm_area_struct *vm_next;
|
||||
struct vm_area_struct *vm_next, *vm_prev;
|
||||
|
||||
pgprot_t vm_page_prot; /* Access permissions of this VMA. */
|
||||
unsigned long vm_flags; /* Flags, see mm.h. */
|
||||
|
|
62
include/trace/events/workqueue.h
Normal file
62
include/trace/events/workqueue.h
Normal file
|
@ -0,0 +1,62 @@
|
|||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM workqueue
|
||||
|
||||
#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_WORKQUEUE_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
/**
|
||||
* workqueue_execute_start - called immediately before the workqueue callback
|
||||
* @work: pointer to struct work_struct
|
||||
*
|
||||
* Allows to track workqueue execution.
|
||||
*/
|
||||
TRACE_EVENT(workqueue_execute_start,
|
||||
|
||||
TP_PROTO(struct work_struct *work),
|
||||
|
||||
TP_ARGS(work),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( void *, work )
|
||||
__field( void *, function)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->work = work;
|
||||
__entry->function = work->func;
|
||||
),
|
||||
|
||||
TP_printk("work struct %p: function %pf", __entry->work, __entry->function)
|
||||
);
|
||||
|
||||
/**
|
||||
* workqueue_execute_end - called immediately before the workqueue callback
|
||||
* @work: pointer to struct work_struct
|
||||
*
|
||||
* Allows to track workqueue execution.
|
||||
*/
|
||||
TRACE_EVENT(workqueue_execute_end,
|
||||
|
||||
TP_PROTO(struct work_struct *work),
|
||||
|
||||
TP_ARGS(work),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( void *, work )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->work = work;
|
||||
),
|
||||
|
||||
TP_printk("work struct %p", __entry->work)
|
||||
);
|
||||
|
||||
|
||||
#endif /* _TRACE_WORKQUEUE_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
|
@ -300,7 +300,7 @@ out:
|
|||
#ifdef CONFIG_MMU
|
||||
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
|
||||
{
|
||||
struct vm_area_struct *mpnt, *tmp, **pprev;
|
||||
struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
|
||||
struct rb_node **rb_link, *rb_parent;
|
||||
int retval;
|
||||
unsigned long charge;
|
||||
|
@ -328,6 +328,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
|
|||
if (retval)
|
||||
goto out;
|
||||
|
||||
prev = NULL;
|
||||
for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
|
||||
struct file *file;
|
||||
|
||||
|
@ -359,7 +360,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
|
|||
goto fail_nomem_anon_vma_fork;
|
||||
tmp->vm_flags &= ~VM_LOCKED;
|
||||
tmp->vm_mm = mm;
|
||||
tmp->vm_next = NULL;
|
||||
tmp->vm_next = tmp->vm_prev = NULL;
|
||||
file = tmp->vm_file;
|
||||
if (file) {
|
||||
struct inode *inode = file->f_path.dentry->d_inode;
|
||||
|
@ -392,6 +393,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
|
|||
*/
|
||||
*pprev = tmp;
|
||||
pprev = &tmp->vm_next;
|
||||
tmp->vm_prev = prev;
|
||||
prev = tmp;
|
||||
|
||||
__vma_link_rb(mm, tmp, rb_link, rb_parent);
|
||||
rb_link = &tmp->vm_rb.rb_right;
|
||||
|
|
|
@ -35,6 +35,9 @@
|
|||
#include <linux/lockdep.h>
|
||||
#include <linux/idr.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/workqueue.h>
|
||||
|
||||
#include "workqueue_sched.h"
|
||||
|
||||
enum {
|
||||
|
@ -1790,7 +1793,13 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
|
|||
work_clear_pending(work);
|
||||
lock_map_acquire(&cwq->wq->lockdep_map);
|
||||
lock_map_acquire(&lockdep_map);
|
||||
trace_workqueue_execute_start(work);
|
||||
f(work);
|
||||
/*
|
||||
* While we must be careful to not use "work" after this, the trace
|
||||
* point will only record its address.
|
||||
*/
|
||||
trace_workqueue_execute_end(work);
|
||||
lock_map_release(&lockdep_map);
|
||||
lock_map_release(&cwq->wq->lockdep_map);
|
||||
|
||||
|
|
15
mm/memory.c
15
mm/memory.c
|
@ -2770,11 +2770,18 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
|
|||
{
|
||||
address &= PAGE_MASK;
|
||||
if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
|
||||
address -= PAGE_SIZE;
|
||||
if (find_vma(vma->vm_mm, address) != vma)
|
||||
return -ENOMEM;
|
||||
struct vm_area_struct *prev = vma->vm_prev;
|
||||
|
||||
expand_stack(vma, address);
|
||||
/*
|
||||
* Is there a mapping abutting this one below?
|
||||
*
|
||||
* That's only ok if it's the same stack mapping
|
||||
* that has gotten split..
|
||||
*/
|
||||
if (prev && prev->vm_end == address)
|
||||
return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
|
||||
|
||||
expand_stack(vma, address - PAGE_SIZE);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
19
mm/mlock.c
19
mm/mlock.c
|
@ -135,6 +135,19 @@ void munlock_vma_page(struct page *page)
|
|||
}
|
||||
}
|
||||
|
||||
/* Is the vma a continuation of the stack vma above it? */
|
||||
static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
|
||||
}
|
||||
|
||||
static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
return (vma->vm_flags & VM_GROWSDOWN) &&
|
||||
(vma->vm_start == addr) &&
|
||||
!vma_stack_continue(vma->vm_prev, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* __mlock_vma_pages_range() - mlock a range of pages in the vma.
|
||||
* @vma: target vma
|
||||
|
@ -168,12 +181,10 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
|
|||
gup_flags |= FOLL_WRITE;
|
||||
|
||||
/* We don't try to access the guard page of a stack vma */
|
||||
if (vma->vm_flags & VM_GROWSDOWN) {
|
||||
if (start == vma->vm_start) {
|
||||
start += PAGE_SIZE;
|
||||
if (stack_guard_page(vma, start)) {
|
||||
addr += PAGE_SIZE;
|
||||
nr_pages--;
|
||||
}
|
||||
}
|
||||
|
||||
while (nr_pages > 0) {
|
||||
int i;
|
||||
|
|
21
mm/mmap.c
21
mm/mmap.c
|
@ -388,17 +388,23 @@ static inline void
|
|||
__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
struct vm_area_struct *prev, struct rb_node *rb_parent)
|
||||
{
|
||||
struct vm_area_struct *next;
|
||||
|
||||
vma->vm_prev = prev;
|
||||
if (prev) {
|
||||
vma->vm_next = prev->vm_next;
|
||||
next = prev->vm_next;
|
||||
prev->vm_next = vma;
|
||||
} else {
|
||||
mm->mmap = vma;
|
||||
if (rb_parent)
|
||||
vma->vm_next = rb_entry(rb_parent,
|
||||
next = rb_entry(rb_parent,
|
||||
struct vm_area_struct, vm_rb);
|
||||
else
|
||||
vma->vm_next = NULL;
|
||||
next = NULL;
|
||||
}
|
||||
vma->vm_next = next;
|
||||
if (next)
|
||||
next->vm_prev = vma;
|
||||
}
|
||||
|
||||
void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
|
@ -483,7 +489,11 @@ static inline void
|
|||
__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
struct vm_area_struct *prev)
|
||||
{
|
||||
prev->vm_next = vma->vm_next;
|
||||
struct vm_area_struct *next = vma->vm_next;
|
||||
|
||||
prev->vm_next = next;
|
||||
if (next)
|
||||
next->vm_prev = prev;
|
||||
rb_erase(&vma->vm_rb, &mm->mm_rb);
|
||||
if (mm->mmap_cache == vma)
|
||||
mm->mmap_cache = prev;
|
||||
|
@ -1915,6 +1925,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
unsigned long addr;
|
||||
|
||||
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
|
||||
vma->vm_prev = NULL;
|
||||
do {
|
||||
rb_erase(&vma->vm_rb, &mm->mm_rb);
|
||||
mm->map_count--;
|
||||
|
@ -1922,6 +1933,8 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
vma = vma->vm_next;
|
||||
} while (vma && vma->vm_start < end);
|
||||
*insertion_point = vma;
|
||||
if (vma)
|
||||
vma->vm_prev = prev;
|
||||
tail_vma->vm_next = NULL;
|
||||
if (mm->unmap_area == arch_unmap_area)
|
||||
addr = prev ? prev->vm_end : mm->mmap_base;
|
||||
|
|
|
@ -604,7 +604,7 @@ static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
|
|||
*/
|
||||
static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
|
||||
{
|
||||
struct vm_area_struct *pvma, **pp;
|
||||
struct vm_area_struct *pvma, **pp, *next;
|
||||
struct address_space *mapping;
|
||||
struct rb_node **p, *parent;
|
||||
|
||||
|
@ -664,8 +664,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
|
|||
break;
|
||||
}
|
||||
|
||||
vma->vm_next = *pp;
|
||||
next = *pp;
|
||||
*pp = vma;
|
||||
vma->vm_next = next;
|
||||
if (next)
|
||||
next->vm_prev = vma;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Add table
Reference in a new issue