Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6

This commit is contained in:
Steve French 2009-07-18 03:13:38 +00:00
commit 287638b2c5
36 changed files with 175 additions and 88 deletions

View file

@ -73,7 +73,7 @@ The remaining CPU time will be used for user input and other tasks. Because
realtime tasks have explicitly allocated the CPU time they need to perform realtime tasks have explicitly allocated the CPU time they need to perform
their tasks, buffer underruns in the graphics or audio can be eliminated. their tasks, buffer underruns in the graphics or audio can be eliminated.
NOTE: the above example is not fully implemented as of yet (2.6.25). We still NOTE: the above example is not fully implemented yet. We still
lack an EDF scheduler to make non-uniform periods usable. lack an EDF scheduler to make non-uniform periods usable.
@ -140,14 +140,15 @@ The other option is:
.o CONFIG_CGROUP_SCHED (aka "Basis for grouping tasks" = "Control groups") .o CONFIG_CGROUP_SCHED (aka "Basis for grouping tasks" = "Control groups")
This uses the /cgroup virtual file system and "/cgroup/<cgroup>/cpu.rt_runtime_us" This uses the /cgroup virtual file system and
to control the CPU time reserved for each control group instead. "/cgroup/<cgroup>/cpu.rt_runtime_us" to control the CPU time reserved for each
control group instead.
For more information on working with control groups, you should read For more information on working with control groups, you should read
Documentation/cgroups/cgroups.txt as well. Documentation/cgroups/cgroups.txt as well.
Group settings are checked against the following limits in order to keep the configuration Group settings are checked against the following limits in order to keep the
schedulable: configuration schedulable:
\Sum_{i} runtime_{i} / global_period <= global_runtime / global_period \Sum_{i} runtime_{i} / global_period <= global_runtime / global_period
@ -189,7 +190,7 @@ Implementing SCHED_EDF might take a while to complete. Priority Inheritance is
the biggest challenge as the current linux PI infrastructure is geared towards the biggest challenge as the current linux PI infrastructure is geared towards
the limited static priority levels 0-99. With deadline scheduling you need to the limited static priority levels 0-99. With deadline scheduling you need to
do deadline inheritance (since priority is inversely proportional to the do deadline inheritance (since priority is inversely proportional to the
deadline delta (deadline - now). deadline delta (deadline - now)).
This means the whole PI machinery will have to be reworked - and that is one of This means the whole PI machinery will have to be reworked - and that is one of
the most complex pieces of code we have. the most complex pieces of code we have.

View file

@ -343,7 +343,8 @@ KBUILD_CPPFLAGS := -D__KERNEL__
KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -fno-common \ -fno-strict-aliasing -fno-common \
-Werror-implicit-function-declaration \ -Werror-implicit-function-declaration \
-Wno-format-security -Wno-format-security \
-fno-delete-null-pointer-checks
KBUILD_AFLAGS := -D__ASSEMBLY__ KBUILD_AFLAGS := -D__ASSEMBLY__
# Read KERNELRELEASE from include/config/kernel.release (if it exists) # Read KERNELRELEASE from include/config/kernel.release (if it exists)

View file

@ -6,6 +6,8 @@
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#include <linux/types.h>
/* floating point status register: */ /* floating point status register: */
#define FPSR_TRAP_VD (1 << 0) /* invalid op trap disabled */ #define FPSR_TRAP_VD (1 << 0) /* invalid op trap disabled */
#define FPSR_TRAP_DD (1 << 1) /* denormal trap disabled */ #define FPSR_TRAP_DD (1 << 1) /* denormal trap disabled */

View file

@ -33,6 +33,7 @@
#ifndef _ASM_IA64_XEN_HYPERVISOR_H #ifndef _ASM_IA64_XEN_HYPERVISOR_H
#define _ASM_IA64_XEN_HYPERVISOR_H #define _ASM_IA64_XEN_HYPERVISOR_H
#include <linux/err.h>
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
#include <xen/interface/version.h> /* to compile feature.c */ #include <xen/interface/version.h> /* to compile feature.c */
#include <xen/features.h> /* to comiple xen-netfront.c */ #include <xen/features.h> /* to comiple xen-netfront.c */

View file

@ -6,6 +6,14 @@ int iommu_detected __read_mostly;
struct dma_map_ops *dma_ops; struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops); EXPORT_SYMBOL(dma_ops);
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
static int __init dma_init(void)
{
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
}
fs_initcall(dma_init);
struct dma_map_ops *dma_get_ops(struct device *dev) struct dma_map_ops *dma_get_ops(struct device *dev)
{ {
return dma_ops; return dma_ops;

View file

@ -736,15 +736,16 @@ void user_disable_single_step(struct task_struct *task)
{ {
struct pt_regs *regs = task->thread.regs; struct pt_regs *regs = task->thread.regs;
#if defined(CONFIG_BOOKE)
/* If DAC then do not single step, skip */
if (task->thread.dabr)
return;
#endif
if (regs != NULL) { if (regs != NULL) {
#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) #if defined(CONFIG_BOOKE)
/* If DAC don't clear DBCRO_IDM or MSR_DE */
if (task->thread.dabr)
task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT);
else {
task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM);
regs->msr &= ~MSR_DE;
}
#elif defined(CONFIG_40x)
task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM); task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM);
regs->msr &= ~MSR_DE; regs->msr &= ~MSR_DE;
#else #else

View file

@ -80,10 +80,10 @@ _GLOBAL(load_up_altivec)
mtvscr vr0 mtvscr vr0
REST_32VRS(0,r4,r5) REST_32VRS(0,r4,r5)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
/* Update last_task_used_math to 'current' */ /* Update last_task_used_altivec to 'current' */
subi r4,r5,THREAD /* Back to 'current' */ subi r4,r5,THREAD /* Back to 'current' */
fromreal(r4) fromreal(r4)
PPC_STL r4,ADDROFF(last_task_used_math)(r3) PPC_STL r4,ADDROFF(last_task_used_altivec)(r3)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/* restore registers and return */ /* restore registers and return */
blr blr
@ -172,7 +172,7 @@ _GLOBAL(load_up_vsx)
oris r12,r12,MSR_VSX@h oris r12,r12,MSR_VSX@h
std r12,_MSR(r1) std r12,_MSR(r1)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
/* Update last_task_used_math to 'current' */ /* Update last_task_used_vsx to 'current' */
ld r4,PACACURRENT(r13) ld r4,PACACURRENT(r13)
std r4,0(r3) std r4,0(r3)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */

View file

@ -161,6 +161,7 @@ extern int io_apic_set_pci_routing(struct device *dev, int irq,
struct io_apic_irq_attr *irq_attr); struct io_apic_irq_attr *irq_attr);
extern int (*ioapic_renumber_irq)(int ioapic, int irq); extern int (*ioapic_renumber_irq)(int ioapic, int irq);
extern void ioapic_init_mappings(void); extern void ioapic_init_mappings(void);
extern void ioapic_insert_resources(void);
extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); extern struct IO_APIC_route_entry **alloc_ioapic_entries(void);
extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries); extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries);
@ -180,6 +181,7 @@ extern void ioapic_write_entry(int apic, int pin,
#define io_apic_assign_pci_irqs 0 #define io_apic_assign_pci_irqs 0
static const int timer_through_8259 = 0; static const int timer_through_8259 = 0;
static inline void ioapic_init_mappings(void) { } static inline void ioapic_init_mappings(void) { }
static inline void ioapic_insert_resources(void) { }
static inline void probe_nr_irqs_gsi(void) { } static inline void probe_nr_irqs_gsi(void) { }
#endif #endif

View file

@ -30,7 +30,7 @@
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include <asm/kvm_para.h> #include <asm/kvm_para.h>
/*G:031 But first, how does our Guest contact the Host to ask for privileged /*G:030 But first, how does our Guest contact the Host to ask for privileged
* operations? There are two ways: the direct way is to make a "hypercall", * operations? There are two ways: the direct way is to make a "hypercall",
* to make requests of the Host Itself. * to make requests of the Host Itself.
* *

View file

@ -4181,28 +4181,20 @@ fake_ioapic_page:
} }
} }
static int __init ioapic_insert_resources(void) void __init ioapic_insert_resources(void)
{ {
int i; int i;
struct resource *r = ioapic_resources; struct resource *r = ioapic_resources;
if (!r) { if (!r) {
if (nr_ioapics > 0) { if (nr_ioapics > 0)
printk(KERN_ERR printk(KERN_ERR
"IO APIC resources couldn't be allocated.\n"); "IO APIC resources couldn't be allocated.\n");
return -1; return;
}
return 0;
} }
for (i = 0; i < nr_ioapics; i++) { for (i = 0; i < nr_ioapics; i++) {
insert_resource(&iomem_resource, r); insert_resource(&iomem_resource, r);
r++; r++;
} }
return 0;
} }
/* Insert the IO APIC resources after PCI initialization has occured to handle
* IO APICS that are mapped in on a BAR in PCI space. */
late_initcall(ioapic_insert_resources);

View file

@ -379,6 +379,11 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
native_cpuid(ax, bx, cx, dx); native_cpuid(ax, bx, cx, dx);
switch (function) { switch (function) {
case 0: /* ID and highest CPUID. Futureproof a little by sticking to
* older ones. */
if (*ax > 5)
*ax = 5;
break;
case 1: /* Basic feature request. */ case 1: /* Basic feature request. */
/* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */ /* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */
*cx &= 0x00002201; *cx &= 0x00002201;
@ -1079,7 +1084,7 @@ static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf,
return insn_len; return insn_len;
} }
/*G:030 Once we get to lguest_init(), we know we're a Guest. The various /*G:029 Once we get to lguest_init(), we know we're a Guest. The various
* pv_ops structures in the kernel provide points for (almost) every routine we * pv_ops structures in the kernel provide points for (almost) every routine we
* have to override to avoid privileged instructions. */ * have to override to avoid privileged instructions. */
__init void lguest_init(void) __init void lguest_init(void)

View file

@ -35,6 +35,7 @@
#include <asm/pat.h> #include <asm/pat.h>
#include <asm/e820.h> #include <asm/e820.h>
#include <asm/pci_x86.h> #include <asm/pci_x86.h>
#include <asm/io_apic.h>
static int static int
@ -227,6 +228,12 @@ void __init pcibios_resource_survey(void)
pcibios_allocate_resources(1); pcibios_allocate_resources(1);
e820_reserve_resources_late(); e820_reserve_resources_late();
/*
* Insert the IO APIC resources after PCI initialization has
* occured to handle IO APICS that are mapped in on a BAR in
* PCI space, but before trying to assign unassigned pci res.
*/
ioapic_insert_resources();
} }
/** /**

View file

@ -213,7 +213,7 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
* Only allow the generic SCSI ioctls if the host can support it. * Only allow the generic SCSI ioctls if the host can support it.
*/ */
if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI)) if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
return -ENOIOCTLCMD; return -ENOTTY;
return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp); return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
} }
@ -360,6 +360,9 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2); blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2);
blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2); blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2);
/* No need to bounce any requests */
blk_queue_bounce_limit(vblk->disk->queue, BLK_BOUNCE_ANY);
/* No real sector limit. */ /* No real sector limit. */
blk_queue_max_sectors(vblk->disk->queue, -1U); blk_queue_max_sectors(vblk->disk->queue, -1U);

View file

@ -1331,9 +1331,6 @@ handle_newline:
static void n_tty_write_wakeup(struct tty_struct *tty) static void n_tty_write_wakeup(struct tty_struct *tty)
{ {
/* Write out any echoed characters that are still pending */
process_echoes(tty);
if (tty->fasync && test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) if (tty->fasync && test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags))
kill_fasync(&tty->fasync, SIGIO, POLL_OUT); kill_fasync(&tty->fasync, SIGIO, POLL_OUT);
} }

View file

@ -828,7 +828,7 @@ static int receive_data(enum port_type index, struct nozomi *dc)
struct port *port = &dc->port[index]; struct port *port = &dc->port[index];
void __iomem *addr = port->dl_addr[port->toggle_dl]; void __iomem *addr = port->dl_addr[port->toggle_dl];
struct tty_struct *tty = tty_port_tty_get(&port->port); struct tty_struct *tty = tty_port_tty_get(&port->port);
int i; int i, ret;
if (unlikely(!tty)) { if (unlikely(!tty)) {
DBG1("tty not open for port: %d?", index); DBG1("tty not open for port: %d?", index);
@ -844,12 +844,14 @@ static int receive_data(enum port_type index, struct nozomi *dc)
/* disable interrupt in downlink... */ /* disable interrupt in downlink... */
disable_transmit_dl(index, dc); disable_transmit_dl(index, dc);
return 0; ret = 0;
goto put;
} }
if (unlikely(size == 0)) { if (unlikely(size == 0)) {
dev_err(&dc->pdev->dev, "size == 0?\n"); dev_err(&dc->pdev->dev, "size == 0?\n");
return 1; ret = 1;
goto put;
} }
tty_buffer_request_room(tty, size); tty_buffer_request_room(tty, size);
@ -871,8 +873,10 @@ static int receive_data(enum port_type index, struct nozomi *dc)
} }
set_bit(index, &dc->flip); set_bit(index, &dc->flip);
ret = 1;
put:
tty_kref_put(tty); tty_kref_put(tty);
return 1; return ret;
} }
/* Debug for interrupts */ /* Debug for interrupts */

View file

@ -790,17 +790,20 @@ void tty_ldisc_hangup(struct tty_struct *tty)
* N_TTY. * N_TTY.
*/ */
if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) { if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
/* Avoid racing set_ldisc */ /* Avoid racing set_ldisc or tty_ldisc_release */
mutex_lock(&tty->ldisc_mutex); mutex_lock(&tty->ldisc_mutex);
/* Switch back to N_TTY */ if (tty->ldisc) { /* Not yet closed */
tty_ldisc_halt(tty); /* Switch back to N_TTY */
tty_ldisc_wait_idle(tty); tty_ldisc_halt(tty);
tty_ldisc_reinit(tty); tty_ldisc_wait_idle(tty);
/* At this point we have a closed ldisc and we want to tty_ldisc_reinit(tty);
reopen it. We could defer this to the next open but /* At this point we have a closed ldisc and we want to
it means auditing a lot of other paths so this is a FIXME */ reopen it. We could defer this to the next open but
WARN_ON(tty_ldisc_open(tty, tty->ldisc)); it means auditing a lot of other paths so this is
tty_ldisc_enable(tty); a FIXME */
WARN_ON(tty_ldisc_open(tty, tty->ldisc));
tty_ldisc_enable(tty);
}
mutex_unlock(&tty->ldisc_mutex); mutex_unlock(&tty->ldisc_mutex);
tty_reset_termios(tty); tty_reset_termios(tty);
} }
@ -865,6 +868,7 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
tty_ldisc_wait_idle(tty); tty_ldisc_wait_idle(tty);
mutex_lock(&tty->ldisc_mutex);
/* /*
* Now kill off the ldisc * Now kill off the ldisc
*/ */
@ -875,6 +879,7 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
/* Ensure the next open requests the N_TTY ldisc */ /* Ensure the next open requests the N_TTY ldisc */
tty_set_termios_ldisc(tty, N_TTY); tty_set_termios_ldisc(tty, N_TTY);
mutex_unlock(&tty->ldisc_mutex);
/* This will need doing differently if we need to lock */ /* This will need doing differently if we need to lock */
if (o_tty) if (o_tty)

View file

@ -267,7 +267,7 @@ int tty_port_block_til_ready(struct tty_port *port,
if (retval == 0) if (retval == 0)
port->flags |= ASYNC_NORMAL_ACTIVE; port->flags |= ASYNC_NORMAL_ACTIVE;
spin_unlock_irqrestore(&port->lock, flags); spin_unlock_irqrestore(&port->lock, flags);
return 0; return retval;
} }
EXPORT_SYMBOL(tty_port_block_til_ready); EXPORT_SYMBOL(tty_port_block_til_ready);

View file

@ -770,14 +770,12 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
visual_init(vc, currcons, 1); visual_init(vc, currcons, 1);
if (!*vc->vc_uni_pagedir_loc) if (!*vc->vc_uni_pagedir_loc)
con_set_default_unimap(vc); con_set_default_unimap(vc);
if (!vc->vc_kmalloced) vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
if (!vc->vc_screenbuf) { if (!vc->vc_screenbuf) {
kfree(vc); kfree(vc);
vc_cons[currcons].d = NULL; vc_cons[currcons].d = NULL;
return -ENOMEM; return -ENOMEM;
} }
vc->vc_kmalloced = 1;
vc_init(vc, vc->vc_rows, vc->vc_cols, 1); vc_init(vc, vc->vc_rows, vc->vc_cols, 1);
vcs_make_sysfs(currcons); vcs_make_sysfs(currcons);
atomic_notifier_call_chain(&vt_notifier_list, VT_ALLOCATE, &param); atomic_notifier_call_chain(&vt_notifier_list, VT_ALLOCATE, &param);
@ -913,10 +911,8 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (new_scr_end > new_origin) if (new_scr_end > new_origin)
scr_memsetw((void *)new_origin, vc->vc_video_erase_char, scr_memsetw((void *)new_origin, vc->vc_video_erase_char,
new_scr_end - new_origin); new_scr_end - new_origin);
if (vc->vc_kmalloced) kfree(vc->vc_screenbuf);
kfree(vc->vc_screenbuf);
vc->vc_screenbuf = newscreen; vc->vc_screenbuf = newscreen;
vc->vc_kmalloced = 1;
vc->vc_screenbuf_size = new_screen_size; vc->vc_screenbuf_size = new_screen_size;
set_origin(vc); set_origin(vc);
@ -995,8 +991,7 @@ void vc_deallocate(unsigned int currcons)
vc->vc_sw->con_deinit(vc); vc->vc_sw->con_deinit(vc);
put_pid(vc->vt_pid); put_pid(vc->vt_pid);
module_put(vc->vc_sw->owner); module_put(vc->vc_sw->owner);
if (vc->vc_kmalloced) kfree(vc->vc_screenbuf);
kfree(vc->vc_screenbuf);
if (currcons >= MIN_NR_CONSOLES) if (currcons >= MIN_NR_CONSOLES)
kfree(vc); kfree(vc);
vc_cons[currcons].d = NULL; vc_cons[currcons].d = NULL;
@ -2881,7 +2876,6 @@ static int __init con_init(void)
INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
visual_init(vc, currcons, 1); visual_init(vc, currcons, 1);
vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT); vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT);
vc->vc_kmalloced = 0;
vc_init(vc, vc->vc_rows, vc->vc_cols, vc_init(vc, vc->vc_rows, vc->vc_cols,
currcons || !vc->vc_sw->con_save_screen); currcons || !vc->vc_sw->con_save_screen);
} }

View file

@ -38,8 +38,6 @@ struct lguest_pages
#define CHANGED_GDT_TLS 4 /* Actually a subset of CHANGED_GDT */ #define CHANGED_GDT_TLS 4 /* Actually a subset of CHANGED_GDT */
#define CHANGED_ALL 3 #define CHANGED_ALL 3
struct lguest;
struct lg_cpu { struct lg_cpu {
unsigned int id; unsigned int id;
struct lguest *lg; struct lguest *lg;

View file

@ -356,6 +356,7 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
if (!skb_queue_empty(&ap->rqueue)) if (!skb_queue_empty(&ap->rqueue))
tasklet_schedule(&ap->tsk); tasklet_schedule(&ap->tsk);
ap_put(ap); ap_put(ap);
tty_unthrottle(tty);
} }
static void static void

View file

@ -397,6 +397,7 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
if (!skb_queue_empty(&ap->rqueue)) if (!skb_queue_empty(&ap->rqueue))
tasklet_schedule(&ap->tsk); tasklet_schedule(&ap->tsk);
sp_put(ap); sp_put(ap);
tty_unthrottle(tty);
} }
static void static void

View file

@ -236,7 +236,6 @@ static int sport_startup(struct uart_port *port)
int retval; int retval;
pr_debug("%s enter\n", __func__); pr_debug("%s enter\n", __func__);
memset(buffer, 20, '\0');
snprintf(buffer, 20, "%s rx", up->name); snprintf(buffer, 20, "%s rx", up->name);
retval = request_irq(up->rx_irq, sport_uart_rx_irq, IRQF_SAMPLE_RANDOM, buffer, up); retval = request_irq(up->rx_irq, sport_uart_rx_irq, IRQF_SAMPLE_RANDOM, buffer, up);
if (retval) { if (retval) {

View file

@ -730,7 +730,6 @@ static int __devexit msm_serial_remove(struct platform_device *pdev)
} }
static struct platform_driver msm_platform_driver = { static struct platform_driver msm_platform_driver = {
.probe = msm_serial_probe,
.remove = msm_serial_remove, .remove = msm_serial_remove,
.driver = { .driver = {
.name = "msm_serial", .name = "msm_serial",

View file

@ -669,7 +669,7 @@ static int __init virtio_pci_init(void)
err = pci_register_driver(&virtio_pci_driver); err = pci_register_driver(&virtio_pci_driver);
if (err) if (err)
device_unregister(virtio_pci_root); root_device_unregister(virtio_pci_root);
return err; return err;
} }

View file

@ -89,7 +89,6 @@ struct vc_data {
unsigned int vc_need_wrap : 1; unsigned int vc_need_wrap : 1;
unsigned int vc_can_do_color : 1; unsigned int vc_can_do_color : 1;
unsigned int vc_report_mouse : 2; unsigned int vc_report_mouse : 2;
unsigned int vc_kmalloced : 1;
unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */ unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */
unsigned char vc_utf_count; unsigned char vc_utf_count;
int vc_utf_char; int vc_utf_char;

View file

@ -448,7 +448,7 @@ extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
static inline void timer_stats_account_hrtimer(struct hrtimer *timer) static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
{ {
if (likely(!timer->start_pid)) if (likely(!timer->start_site))
return; return;
timer_stats_update_stats(timer, timer->start_pid, timer->start_site, timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
timer->function, timer->start_comm, 0); timer->function, timer->start_comm, 0);

View file

@ -11,7 +11,7 @@
#define LG_CLOCK_MIN_DELTA 100UL #define LG_CLOCK_MIN_DELTA 100UL
#define LG_CLOCK_MAX_DELTA ULONG_MAX #define LG_CLOCK_MAX_DELTA ULONG_MAX
/*G:032 The second method of communicating with the Host is to via "struct /*G:031 The second method of communicating with the Host is to via "struct
* lguest_data". Once the Guest's initialization hypercall tells the Host where * lguest_data". Once the Guest's initialization hypercall tells the Host where
* this is, the Guest and Host both publish information in it. :*/ * this is, the Guest and Host both publish information in it. :*/
struct lguest_data struct lguest_data

View file

@ -27,6 +27,7 @@
#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */ #define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */
#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */ #define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */
#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */ #define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */
#define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */
#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */ #define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
@ -81,14 +82,19 @@ typedef __u8 virtio_net_ctrl_ack;
#define VIRTIO_NET_ERR 1 #define VIRTIO_NET_ERR 1
/* /*
* Control the RX mode, ie. promisucous and allmulti. PROMISC and * Control the RX mode, ie. promisucous, allmulti, etc...
* ALLMULTI commands require an "out" sg entry containing a 1 byte * All commands require an "out" sg entry containing a 1 byte
* state value, zero = disable, non-zero = enable. These commands * state value, zero = disable, non-zero = enable. Commands
* are supported with the VIRTIO_NET_F_CTRL_RX feature. * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature.
* Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA.
*/ */
#define VIRTIO_NET_CTRL_RX 0 #define VIRTIO_NET_CTRL_RX 0
#define VIRTIO_NET_CTRL_RX_PROMISC 0 #define VIRTIO_NET_CTRL_RX_PROMISC 0
#define VIRTIO_NET_CTRL_RX_ALLMULTI 1 #define VIRTIO_NET_CTRL_RX_ALLMULTI 1
#define VIRTIO_NET_CTRL_RX_ALLUNI 2
#define VIRTIO_NET_CTRL_RX_NOMULTI 3
#define VIRTIO_NET_CTRL_RX_NOUNI 4
#define VIRTIO_NET_CTRL_RX_NOBCAST 5
/* /*
* Control the MAC filter table. * Control the MAC filter table.

View file

@ -493,6 +493,7 @@ struct rt_rq {
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned long rt_nr_migratory; unsigned long rt_nr_migratory;
unsigned long rt_nr_total;
int overloaded; int overloaded;
struct plist_head pushable_tasks; struct plist_head pushable_tasks;
#endif #endif
@ -2571,15 +2572,37 @@ static void __sched_fork(struct task_struct *p)
p->se.avg_wakeup = sysctl_sched_wakeup_granularity; p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
p->se.wait_start = 0; p->se.wait_start = 0;
p->se.sum_sleep_runtime = 0; p->se.wait_max = 0;
p->se.sleep_start = 0; p->se.wait_count = 0;
p->se.block_start = 0; p->se.wait_sum = 0;
p->se.sleep_max = 0;
p->se.block_max = 0; p->se.sleep_start = 0;
p->se.exec_max = 0; p->se.sleep_max = 0;
p->se.slice_max = 0; p->se.sum_sleep_runtime = 0;
p->se.wait_max = 0;
p->se.block_start = 0;
p->se.block_max = 0;
p->se.exec_max = 0;
p->se.slice_max = 0;
p->se.nr_migrations_cold = 0;
p->se.nr_failed_migrations_affine = 0;
p->se.nr_failed_migrations_running = 0;
p->se.nr_failed_migrations_hot = 0;
p->se.nr_forced_migrations = 0;
p->se.nr_forced2_migrations = 0;
p->se.nr_wakeups = 0;
p->se.nr_wakeups_sync = 0;
p->se.nr_wakeups_migrate = 0;
p->se.nr_wakeups_local = 0;
p->se.nr_wakeups_remote = 0;
p->se.nr_wakeups_affine = 0;
p->se.nr_wakeups_affine_attempts = 0;
p->se.nr_wakeups_passive = 0;
p->se.nr_wakeups_idle = 0;
#endif #endif
INIT_LIST_HEAD(&p->rt.run_list); INIT_LIST_HEAD(&p->rt.run_list);
@ -9074,7 +9097,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
rt_rq->rt_nr_migratory = 0; rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0; rt_rq->overloaded = 0;
plist_head_init(&rq->rt.pushable_tasks, &rq->lock); plist_head_init(&rt_rq->pushable_tasks, &rq->lock);
#endif #endif
rt_rq->rt_time = 0; rt_rq->rt_time = 0;

View file

@ -687,7 +687,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
* all of which have the same weight. * all of which have the same weight.
*/ */
if (sched_feat(NORMALIZED_SLEEPER) && if (sched_feat(NORMALIZED_SLEEPER) &&
task_of(se)->policy != SCHED_IDLE) (!entity_is_task(se) ||
task_of(se)->policy != SCHED_IDLE))
thresh = calc_delta_fair(thresh, se); thresh = calc_delta_fair(thresh, se);
vruntime -= thresh; vruntime -= thresh;

View file

@ -10,6 +10,8 @@ static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{ {
return rt_rq->rq; return rt_rq->rq;
@ -22,6 +24,8 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
#else /* CONFIG_RT_GROUP_SCHED */ #else /* CONFIG_RT_GROUP_SCHED */
#define rt_entity_is_task(rt_se) (1)
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{ {
return container_of(rt_rq, struct rq, rt); return container_of(rt_rq, struct rq, rt);
@ -73,7 +77,7 @@ static inline void rt_clear_overload(struct rq *rq)
static void update_rt_migration(struct rt_rq *rt_rq) static void update_rt_migration(struct rt_rq *rt_rq)
{ {
if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) { if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
if (!rt_rq->overloaded) { if (!rt_rq->overloaded) {
rt_set_overload(rq_of_rt_rq(rt_rq)); rt_set_overload(rq_of_rt_rq(rt_rq));
rt_rq->overloaded = 1; rt_rq->overloaded = 1;
@ -86,6 +90,12 @@ static void update_rt_migration(struct rt_rq *rt_rq)
static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{ {
if (!rt_entity_is_task(rt_se))
return;
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
rt_rq->rt_nr_total++;
if (rt_se->nr_cpus_allowed > 1) if (rt_se->nr_cpus_allowed > 1)
rt_rq->rt_nr_migratory++; rt_rq->rt_nr_migratory++;
@ -94,6 +104,12 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{ {
if (!rt_entity_is_task(rt_se))
return;
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
rt_rq->rt_nr_total--;
if (rt_se->nr_cpus_allowed > 1) if (rt_se->nr_cpus_allowed > 1)
rt_rq->rt_nr_migratory--; rt_rq->rt_nr_migratory--;

View file

@ -363,7 +363,7 @@ ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
out_reg: out_reg:
ret = register_ftrace_function_probe(glob, ops, count); ret = register_ftrace_function_probe(glob, ops, count);
return ret; return ret < 0 ? ret : 0;
} }
static struct ftrace_func_command ftrace_traceon_cmd = { static struct ftrace_func_command ftrace_traceon_cmd = {

View file

@ -75,7 +75,7 @@ int __pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream)
{ {
struct pxa2xx_runtime_data *rtd = substream->runtime->private_data; struct pxa2xx_runtime_data *rtd = substream->runtime->private_data;
if (rtd && rtd->params) if (rtd && rtd->params && rtd->params->drcmr)
*rtd->params->drcmr = 0; *rtd->params->drcmr = 0;
snd_pcm_set_runtime_buffer(substream, NULL); snd_pcm_set_runtime_buffer(substream, NULL);

View file

@ -4505,6 +4505,12 @@ static int alc880_parse_auto_config(struct hda_codec *codec)
&dig_nid, 1); &dig_nid, 1);
if (err < 0) if (err < 0)
continue; continue;
if (dig_nid > 0x7f) {
printk(KERN_ERR "alc880_auto: invalid dig_nid "
"connection 0x%x for NID 0x%x\n", dig_nid,
spec->autocfg.dig_out_pins[i]);
continue;
}
if (!i) if (!i)
spec->multiout.dig_out_nid = dig_nid; spec->multiout.dig_out_nid = dig_nid;
else { else {

View file

@ -2197,9 +2197,12 @@ static int __init alsa_card_riptide_init(void)
if (err < 0) if (err < 0)
return err; return err;
#if defined(SUPPORT_JOYSTICK) #if defined(SUPPORT_JOYSTICK)
pci_register_driver(&joystick_driver); err = pci_register_driver(&joystick_driver);
/* On failure unregister formerly registered audio driver */
if (err < 0)
pci_unregister_driver(&driver);
#endif #endif
return 0; return err;
} }
static void __exit alsa_card_riptide_exit(void) static void __exit alsa_card_riptide_exit(void)

View file

@ -2661,7 +2661,7 @@ static int parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
struct usb_interface_descriptor *altsd; struct usb_interface_descriptor *altsd;
int i, altno, err, stream; int i, altno, err, stream;
int format; int format;
struct audioformat *fp; struct audioformat *fp = NULL;
unsigned char *fmt, *csep; unsigned char *fmt, *csep;
int num; int num;
@ -2734,6 +2734,18 @@ static int parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
continue; continue;
} }
/*
* Blue Microphones workaround: The last altsetting is identical
* with the previous one, except for a larger packet size, but
* is actually a mislabeled two-channel setting; ignore it.
*/
if (fmt[4] == 1 && fmt[5] == 2 && altno == 2 && num == 3 &&
fp && fp->altsetting == 1 && fp->channels == 1 &&
fp->format == SNDRV_PCM_FORMAT_S16_LE &&
le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize) ==
fp->maxpacksize * 2)
continue;
csep = snd_usb_find_desc(alts->endpoint[0].extra, alts->endpoint[0].extralen, NULL, USB_DT_CS_ENDPOINT); csep = snd_usb_find_desc(alts->endpoint[0].extra, alts->endpoint[0].extralen, NULL, USB_DT_CS_ENDPOINT);
/* Creamware Noah has this descriptor after the 2nd endpoint */ /* Creamware Noah has this descriptor after the 2nd endpoint */
if (!csep && altsd->bNumEndpoints >= 2) if (!csep && altsd->bNumEndpoints >= 2)