Merge branch 'for_2.6.39/pm-misc' of ssh://master.kernel.org/pub/scm/linux/kernel/git/khilman/linux-omap-pm into omap-for-linus

This commit is contained in:
Tony Lindgren 2011-03-10 18:54:14 -08:00
commit 94a06b74e7
212 changed files with 1768 additions and 1126 deletions

View file

@ -40,8 +40,6 @@ decnet.txt
- info on using the DECnet networking layer in Linux. - info on using the DECnet networking layer in Linux.
depca.txt depca.txt
- the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver - the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver
dgrs.txt
- the Digi International RightSwitch SE-X Ethernet driver
dmfe.txt dmfe.txt
- info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver. - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver.
e100.txt e100.txt
@ -50,8 +48,6 @@ e1000.txt
- info on Intel's E1000 line of gigabit ethernet boards - info on Intel's E1000 line of gigabit ethernet boards
eql.txt eql.txt
- serial IP load balancing - serial IP load balancing
ethertap.txt
- the Ethertap user space packet reception and transmission driver
ewrk3.txt ewrk3.txt
- the Digital EtherWORKS 3 DE203/4/5 Ethernet driver - the Digital EtherWORKS 3 DE203/4/5 Ethernet driver
filter.txt filter.txt
@ -104,8 +100,6 @@ tuntap.txt
- TUN/TAP device driver, allowing user space Rx/Tx of packets. - TUN/TAP device driver, allowing user space Rx/Tx of packets.
vortex.txt vortex.txt
- info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards. - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
wavelan.txt
- AT&T GIS (nee NCR) WaveLAN card: An Ethernet-like radio transceiver
x25.txt x25.txt
- general info on X.25 development. - general info on X.25 development.
x25-iface.txt x25-iface.txt

View file

@ -61,7 +61,6 @@ before the more general line given above as the first match is the one taken.
create dns_resolver foo:* * /usr/sbin/dns.foo %k create dns_resolver foo:* * /usr/sbin/dns.foo %k
===== =====
USAGE USAGE
===== =====
@ -104,6 +103,14 @@ implemented in the module can be called after doing:
returned also. returned also.
===============================
READING DNS KEYS FROM USERSPACE
===============================
Keys of dns_resolver type can be read from userspace using keyctl_read() or
"keyctl read/print/pipe".
========= =========
MECHANISM MECHANISM
========= =========

View file

@ -1010,6 +1010,15 @@ L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
S: Maintained S: Maintained
F: arch/arm/mach-s5p*/ F: arch/arm/mach-s5p*/
ARM/SAMSUNG MOBILE MACHINE SUPPORT
M: Kyungmin Park <kyungmin.park@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-s5pv210/mach-aquila.c
F: arch/arm/mach-s5pv210/mach-goni.c
F: arch/arm/mach-exynos4/mach-universal_c210.c
F: arch/arm/mach-exynos4/mach-nuri.c
ARM/SAMSUNG S5P SERIES FIMC SUPPORT ARM/SAMSUNG S5P SERIES FIMC SUPPORT
M: Kyungmin Park <kyungmin.park@samsung.com> M: Kyungmin Park <kyungmin.park@samsung.com>
M: Sylwester Nawrocki <s.nawrocki@samsung.com> M: Sylwester Nawrocki <s.nawrocki@samsung.com>
@ -1467,6 +1476,7 @@ F: include/net/bluetooth/
BONDING DRIVER BONDING DRIVER
M: Jay Vosburgh <fubar@us.ibm.com> M: Jay Vosburgh <fubar@us.ibm.com>
M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
W: http://sourceforge.net/projects/bonding/ W: http://sourceforge.net/projects/bonding/
S: Supported S: Supported
@ -2033,7 +2043,7 @@ F: Documentation/scsi/dc395x.txt
F: drivers/scsi/dc395x.* F: drivers/scsi/dc395x.*
DCCP PROTOCOL DCCP PROTOCOL
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> M: Gerrit Renker <gerrit@erg.abdn.ac.uk>
L: dccp@vger.kernel.org L: dccp@vger.kernel.org
W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
S: Maintained S: Maintained
@ -3519,7 +3529,7 @@ F: drivers/hwmon/jc42.c
F: Documentation/hwmon/jc42 F: Documentation/hwmon/jc42
JFS FILESYSTEM JFS FILESYSTEM
M: Dave Kleikamp <shaggy@linux.vnet.ibm.com> M: Dave Kleikamp <shaggy@kernel.org>
L: jfs-discussion@lists.sourceforge.net L: jfs-discussion@lists.sourceforge.net
W: http://jfs.sourceforge.net/ W: http://jfs.sourceforge.net/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git
@ -4491,7 +4501,7 @@ S: Maintained
F: arch/arm/*omap*/*clock* F: arch/arm/*omap*/*clock*
OMAP POWER MANAGEMENT SUPPORT OMAP POWER MANAGEMENT SUPPORT
M: Kevin Hilman <khilman@deeprootsystems.com> M: Kevin Hilman <khilman@ti.com>
L: linux-omap@vger.kernel.org L: linux-omap@vger.kernel.org
S: Maintained S: Maintained
F: arch/arm/*omap*/*pm* F: arch/arm/*omap*/*pm*
@ -5171,6 +5181,7 @@ F: drivers/char/random.c
RAPIDIO SUBSYSTEM RAPIDIO SUBSYSTEM
M: Matt Porter <mporter@kernel.crashing.org> M: Matt Porter <mporter@kernel.crashing.org>
M: Alexandre Bounine <alexandre.bounine@idt.com>
S: Maintained S: Maintained
F: drivers/rapidio/ F: drivers/rapidio/

View file

@ -1,7 +1,7 @@
VERSION = 2 VERSION = 2
PATCHLEVEL = 6 PATCHLEVEL = 6
SUBLEVEL = 38 SUBLEVEL = 38
EXTRAVERSION = -rc7 EXTRAVERSION = -rc8
NAME = Flesh-Eating Bats with Fangs NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION* # *DOCUMENTATION*

View file

@ -11,6 +11,7 @@ config ALPHA
select HAVE_GENERIC_HARDIRQS select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE select GENERIC_IRQ_PROBE
select AUTO_IRQ_AFFINITY if SMP select AUTO_IRQ_AFFINITY if SMP
select GENERIC_HARDIRQS_NO_DEPRECATED
help help
The Alpha is a 64-bit general-purpose processor designed and The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory, marketed by the Digital Equipment Corporation of blessed memory,

View file

@ -44,11 +44,16 @@ static char irq_user_affinity[NR_IRQS];
int irq_select_affinity(unsigned int irq) int irq_select_affinity(unsigned int irq)
{ {
struct irq_desc *desc = irq_to_desc[irq]; struct irq_data *data = irq_get_irq_data(irq);
struct irq_chip *chip;
static int last_cpu; static int last_cpu;
int cpu = last_cpu + 1; int cpu = last_cpu + 1;
if (!desc || !get_irq_desc_chip(desc)->set_affinity || irq_user_affinity[irq]) if (!data)
return 1;
chip = irq_data_get_irq_chip(data);
if (!chip->irq_set_affinity || irq_user_affinity[irq])
return 1; return 1;
while (!cpu_possible(cpu) || while (!cpu_possible(cpu) ||
@ -56,8 +61,8 @@ int irq_select_affinity(unsigned int irq)
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
last_cpu = cpu; last_cpu = cpu;
cpumask_copy(desc->affinity, cpumask_of(cpu)); cpumask_copy(data->affinity, cpumask_of(cpu));
get_irq_desc_chip(desc)->set_affinity(irq, cpumask_of(cpu)); chip->irq_set_affinity(data, cpumask_of(cpu), false);
return 0; return 0;
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */

View file

@ -228,14 +228,9 @@ struct irqaction timer_irqaction = {
void __init void __init
init_rtc_irq(void) init_rtc_irq(void)
{ {
struct irq_desc *desc = irq_to_desc(RTC_IRQ); set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
handle_simple_irq, "RTC");
if (desc) { setup_irq(RTC_IRQ, &timer_irqaction);
desc->status |= IRQ_DISABLED;
set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
handle_simple_irq, "RTC");
setup_irq(RTC_IRQ, &timer_irqaction);
}
} }
/* Dummy irqactions. */ /* Dummy irqactions. */

View file

@ -33,10 +33,10 @@ i8259_update_irq_hw(unsigned int irq, unsigned long mask)
} }
inline void inline void
i8259a_enable_irq(unsigned int irq) i8259a_enable_irq(struct irq_data *d)
{ {
spin_lock(&i8259_irq_lock); spin_lock(&i8259_irq_lock);
i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
spin_unlock(&i8259_irq_lock); spin_unlock(&i8259_irq_lock);
} }
@ -47,16 +47,18 @@ __i8259a_disable_irq(unsigned int irq)
} }
void void
i8259a_disable_irq(unsigned int irq) i8259a_disable_irq(struct irq_data *d)
{ {
spin_lock(&i8259_irq_lock); spin_lock(&i8259_irq_lock);
__i8259a_disable_irq(irq); __i8259a_disable_irq(d->irq);
spin_unlock(&i8259_irq_lock); spin_unlock(&i8259_irq_lock);
} }
void void
i8259a_mask_and_ack_irq(unsigned int irq) i8259a_mask_and_ack_irq(struct irq_data *d)
{ {
unsigned int irq = d->irq;
spin_lock(&i8259_irq_lock); spin_lock(&i8259_irq_lock);
__i8259a_disable_irq(irq); __i8259a_disable_irq(irq);
@ -71,9 +73,9 @@ i8259a_mask_and_ack_irq(unsigned int irq)
struct irq_chip i8259a_irq_type = { struct irq_chip i8259a_irq_type = {
.name = "XT-PIC", .name = "XT-PIC",
.unmask = i8259a_enable_irq, .irq_unmask = i8259a_enable_irq,
.mask = i8259a_disable_irq, .irq_mask = i8259a_disable_irq,
.mask_ack = i8259a_mask_and_ack_irq, .irq_mask_ack = i8259a_mask_and_ack_irq,
}; };
void __init void __init

View file

@ -31,11 +31,9 @@ extern void init_rtc_irq(void);
extern void common_init_isa_dma(void); extern void common_init_isa_dma(void);
extern void i8259a_enable_irq(unsigned int); extern void i8259a_enable_irq(struct irq_data *d);
extern void i8259a_disable_irq(unsigned int); extern void i8259a_disable_irq(struct irq_data *d);
extern void i8259a_mask_and_ack_irq(unsigned int); extern void i8259a_mask_and_ack_irq(struct irq_data *d);
extern unsigned int i8259a_startup_irq(unsigned int);
extern void i8259a_end_irq(unsigned int);
extern struct irq_chip i8259a_irq_type; extern struct irq_chip i8259a_irq_type;
extern void init_i8259a_irqs(void); extern void init_i8259a_irqs(void);

View file

@ -29,21 +29,21 @@ pyxis_update_irq_hw(unsigned long mask)
} }
static inline void static inline void
pyxis_enable_irq(unsigned int irq) pyxis_enable_irq(struct irq_data *d)
{ {
pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
} }
static void static void
pyxis_disable_irq(unsigned int irq) pyxis_disable_irq(struct irq_data *d)
{ {
pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
} }
static void static void
pyxis_mask_and_ack_irq(unsigned int irq) pyxis_mask_and_ack_irq(struct irq_data *d)
{ {
unsigned long bit = 1UL << (irq - 16); unsigned long bit = 1UL << (d->irq - 16);
unsigned long mask = cached_irq_mask &= ~bit; unsigned long mask = cached_irq_mask &= ~bit;
/* Disable the interrupt. */ /* Disable the interrupt. */
@ -58,9 +58,9 @@ pyxis_mask_and_ack_irq(unsigned int irq)
static struct irq_chip pyxis_irq_type = { static struct irq_chip pyxis_irq_type = {
.name = "PYXIS", .name = "PYXIS",
.mask_ack = pyxis_mask_and_ack_irq, .irq_mask_ack = pyxis_mask_and_ack_irq,
.mask = pyxis_disable_irq, .irq_mask = pyxis_disable_irq,
.unmask = pyxis_enable_irq, .irq_unmask = pyxis_enable_irq,
}; };
void void
@ -103,7 +103,7 @@ init_pyxis_irqs(unsigned long ignore_mask)
if ((ignore_mask >> i) & 1) if ((ignore_mask >> i) & 1)
continue; continue;
set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq); set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq);
irq_to_desc(i)->status |= IRQ_LEVEL; irq_set_status_flags(i, IRQ_LEVEL);
} }
setup_irq(16+7, &isa_cascade_irqaction); setup_irq(16+7, &isa_cascade_irqaction);

View file

@ -18,27 +18,27 @@
DEFINE_SPINLOCK(srm_irq_lock); DEFINE_SPINLOCK(srm_irq_lock);
static inline void static inline void
srm_enable_irq(unsigned int irq) srm_enable_irq(struct irq_data *d)
{ {
spin_lock(&srm_irq_lock); spin_lock(&srm_irq_lock);
cserve_ena(irq - 16); cserve_ena(d->irq - 16);
spin_unlock(&srm_irq_lock); spin_unlock(&srm_irq_lock);
} }
static void static void
srm_disable_irq(unsigned int irq) srm_disable_irq(struct irq_data *d)
{ {
spin_lock(&srm_irq_lock); spin_lock(&srm_irq_lock);
cserve_dis(irq - 16); cserve_dis(d->irq - 16);
spin_unlock(&srm_irq_lock); spin_unlock(&srm_irq_lock);
} }
/* Handle interrupts from the SRM, assuming no additional weirdness. */ /* Handle interrupts from the SRM, assuming no additional weirdness. */
static struct irq_chip srm_irq_type = { static struct irq_chip srm_irq_type = {
.name = "SRM", .name = "SRM",
.unmask = srm_enable_irq, .irq_unmask = srm_enable_irq,
.mask = srm_disable_irq, .irq_mask = srm_disable_irq,
.mask_ack = srm_disable_irq, .irq_mask_ack = srm_disable_irq,
}; };
void __init void __init
@ -52,7 +52,7 @@ init_srm_irqs(long max, unsigned long ignore_mask)
if (i < 64 && ((ignore_mask >> i) & 1)) if (i < 64 && ((ignore_mask >> i) & 1))
continue; continue;
set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq); set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq);
irq_to_desc(i)->status |= IRQ_LEVEL; irq_set_status_flags(i, IRQ_LEVEL);
} }
} }

View file

@ -44,31 +44,31 @@ alcor_update_irq_hw(unsigned long mask)
} }
static inline void static inline void
alcor_enable_irq(unsigned int irq) alcor_enable_irq(struct irq_data *d)
{ {
alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
} }
static void static void
alcor_disable_irq(unsigned int irq) alcor_disable_irq(struct irq_data *d)
{ {
alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
} }
static void static void
alcor_mask_and_ack_irq(unsigned int irq) alcor_mask_and_ack_irq(struct irq_data *d)
{ {
alcor_disable_irq(irq); alcor_disable_irq(d);
/* On ALCOR/XLT, need to dismiss interrupt via GRU. */ /* On ALCOR/XLT, need to dismiss interrupt via GRU. */
*(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb(); *(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb();
*(vuip)GRU_INT_CLEAR = 0; mb(); *(vuip)GRU_INT_CLEAR = 0; mb();
} }
static void static void
alcor_isa_mask_and_ack_irq(unsigned int irq) alcor_isa_mask_and_ack_irq(struct irq_data *d)
{ {
i8259a_mask_and_ack_irq(irq); i8259a_mask_and_ack_irq(d);
/* On ALCOR/XLT, need to dismiss interrupt via GRU. */ /* On ALCOR/XLT, need to dismiss interrupt via GRU. */
*(vuip)GRU_INT_CLEAR = 0x80000000; mb(); *(vuip)GRU_INT_CLEAR = 0x80000000; mb();
@ -77,9 +77,9 @@ alcor_isa_mask_and_ack_irq(unsigned int irq)
static struct irq_chip alcor_irq_type = { static struct irq_chip alcor_irq_type = {
.name = "ALCOR", .name = "ALCOR",
.unmask = alcor_enable_irq, .irq_unmask = alcor_enable_irq,
.mask = alcor_disable_irq, .irq_mask = alcor_disable_irq,
.mask_ack = alcor_mask_and_ack_irq, .irq_mask_ack = alcor_mask_and_ack_irq,
}; };
static void static void
@ -126,9 +126,9 @@ alcor_init_irq(void)
if (i >= 16+20 && i <= 16+30) if (i >= 16+20 && i <= 16+30)
continue; continue;
set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq); set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq);
irq_to_desc(i)->status |= IRQ_LEVEL; irq_set_status_flags(i, IRQ_LEVEL);
} }
i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq; i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq;
init_i8259a_irqs(); init_i8259a_irqs();
common_init_isa_dma(); common_init_isa_dma();

View file

@ -46,22 +46,22 @@ cabriolet_update_irq_hw(unsigned int irq, unsigned long mask)
} }
static inline void static inline void
cabriolet_enable_irq(unsigned int irq) cabriolet_enable_irq(struct irq_data *d)
{ {
cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq)); cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq));
} }
static void static void
cabriolet_disable_irq(unsigned int irq) cabriolet_disable_irq(struct irq_data *d)
{ {
cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq); cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq);
} }
static struct irq_chip cabriolet_irq_type = { static struct irq_chip cabriolet_irq_type = {
.name = "CABRIOLET", .name = "CABRIOLET",
.unmask = cabriolet_enable_irq, .irq_unmask = cabriolet_enable_irq,
.mask = cabriolet_disable_irq, .irq_mask = cabriolet_disable_irq,
.mask_ack = cabriolet_disable_irq, .irq_mask_ack = cabriolet_disable_irq,
}; };
static void static void
@ -107,7 +107,7 @@ common_init_irq(void (*srm_dev_int)(unsigned long v))
for (i = 16; i < 35; ++i) { for (i = 16; i < 35; ++i) {
set_irq_chip_and_handler(i, &cabriolet_irq_type, set_irq_chip_and_handler(i, &cabriolet_irq_type,
handle_level_irq); handle_level_irq);
irq_to_desc(i)->status |= IRQ_LEVEL; irq_set_status_flags(i, IRQ_LEVEL);
} }
} }

View file

@ -98,37 +98,37 @@ tsunami_update_irq_hw(unsigned long mask)
} }
static void static void
dp264_enable_irq(unsigned int irq) dp264_enable_irq(struct irq_data *d)
{ {
spin_lock(&dp264_irq_lock); spin_lock(&dp264_irq_lock);
cached_irq_mask |= 1UL << irq; cached_irq_mask |= 1UL << d->irq;
tsunami_update_irq_hw(cached_irq_mask); tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock); spin_unlock(&dp264_irq_lock);
} }
static void static void
dp264_disable_irq(unsigned int irq) dp264_disable_irq(struct irq_data *d)
{ {
spin_lock(&dp264_irq_lock); spin_lock(&dp264_irq_lock);
cached_irq_mask &= ~(1UL << irq); cached_irq_mask &= ~(1UL << d->irq);
tsunami_update_irq_hw(cached_irq_mask); tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock); spin_unlock(&dp264_irq_lock);
} }
static void static void
clipper_enable_irq(unsigned int irq) clipper_enable_irq(struct irq_data *d)
{ {
spin_lock(&dp264_irq_lock); spin_lock(&dp264_irq_lock);
cached_irq_mask |= 1UL << (irq - 16); cached_irq_mask |= 1UL << (d->irq - 16);
tsunami_update_irq_hw(cached_irq_mask); tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock); spin_unlock(&dp264_irq_lock);
} }
static void static void
clipper_disable_irq(unsigned int irq) clipper_disable_irq(struct irq_data *d)
{ {
spin_lock(&dp264_irq_lock); spin_lock(&dp264_irq_lock);
cached_irq_mask &= ~(1UL << (irq - 16)); cached_irq_mask &= ~(1UL << (d->irq - 16));
tsunami_update_irq_hw(cached_irq_mask); tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock); spin_unlock(&dp264_irq_lock);
} }
@ -149,10 +149,11 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
} }
static int static int
dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity,
{ bool force)
{
spin_lock(&dp264_irq_lock); spin_lock(&dp264_irq_lock);
cpu_set_irq_affinity(irq, *affinity); cpu_set_irq_affinity(d->irq, *affinity);
tsunami_update_irq_hw(cached_irq_mask); tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock); spin_unlock(&dp264_irq_lock);
@ -160,10 +161,11 @@ dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
} }
static int static int
clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity,
{ bool force)
{
spin_lock(&dp264_irq_lock); spin_lock(&dp264_irq_lock);
cpu_set_irq_affinity(irq - 16, *affinity); cpu_set_irq_affinity(d->irq - 16, *affinity);
tsunami_update_irq_hw(cached_irq_mask); tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock); spin_unlock(&dp264_irq_lock);
@ -171,19 +173,19 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
} }
static struct irq_chip dp264_irq_type = { static struct irq_chip dp264_irq_type = {
.name = "DP264", .name = "DP264",
.unmask = dp264_enable_irq, .irq_unmask = dp264_enable_irq,
.mask = dp264_disable_irq, .irq_mask = dp264_disable_irq,
.mask_ack = dp264_disable_irq, .irq_mask_ack = dp264_disable_irq,
.set_affinity = dp264_set_affinity, .irq_set_affinity = dp264_set_affinity,
}; };
static struct irq_chip clipper_irq_type = { static struct irq_chip clipper_irq_type = {
.name = "CLIPPER", .name = "CLIPPER",
.unmask = clipper_enable_irq, .irq_unmask = clipper_enable_irq,
.mask = clipper_disable_irq, .irq_mask = clipper_disable_irq,
.mask_ack = clipper_disable_irq, .irq_mask_ack = clipper_disable_irq,
.set_affinity = clipper_set_affinity, .irq_set_affinity = clipper_set_affinity,
}; };
static void static void
@ -268,8 +270,8 @@ init_tsunami_irqs(struct irq_chip * ops, int imin, int imax)
{ {
long i; long i;
for (i = imin; i <= imax; ++i) { for (i = imin; i <= imax; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, ops, handle_level_irq); set_irq_chip_and_handler(i, ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
} }
} }

View file

@ -44,22 +44,22 @@ eb64p_update_irq_hw(unsigned int irq, unsigned long mask)
} }
static inline void static inline void
eb64p_enable_irq(unsigned int irq) eb64p_enable_irq(struct irq_data *d)
{ {
eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
} }
static void static void
eb64p_disable_irq(unsigned int irq) eb64p_disable_irq(struct irq_data *d)
{ {
eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq); eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq);
} }
static struct irq_chip eb64p_irq_type = { static struct irq_chip eb64p_irq_type = {
.name = "EB64P", .name = "EB64P",
.unmask = eb64p_enable_irq, .irq_unmask = eb64p_enable_irq,
.mask = eb64p_disable_irq, .irq_mask = eb64p_disable_irq,
.mask_ack = eb64p_disable_irq, .irq_mask_ack = eb64p_disable_irq,
}; };
static void static void
@ -118,9 +118,9 @@ eb64p_init_irq(void)
init_i8259a_irqs(); init_i8259a_irqs();
for (i = 16; i < 32; ++i) { for (i = 16; i < 32; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq);
} irq_set_status_flags(i, IRQ_LEVEL);
}
common_init_isa_dma(); common_init_isa_dma();
setup_irq(16+5, &isa_cascade_irqaction); setup_irq(16+5, &isa_cascade_irqaction);

View file

@ -51,16 +51,18 @@ eiger_update_irq_hw(unsigned long irq, unsigned long mask)
} }
static inline void static inline void
eiger_enable_irq(unsigned int irq) eiger_enable_irq(struct irq_data *d)
{ {
unsigned int irq = d->irq;
unsigned long mask; unsigned long mask;
mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
eiger_update_irq_hw(irq, mask); eiger_update_irq_hw(irq, mask);
} }
static void static void
eiger_disable_irq(unsigned int irq) eiger_disable_irq(struct irq_data *d)
{ {
unsigned int irq = d->irq;
unsigned long mask; unsigned long mask;
mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
eiger_update_irq_hw(irq, mask); eiger_update_irq_hw(irq, mask);
@ -68,9 +70,9 @@ eiger_disable_irq(unsigned int irq)
static struct irq_chip eiger_irq_type = { static struct irq_chip eiger_irq_type = {
.name = "EIGER", .name = "EIGER",
.unmask = eiger_enable_irq, .irq_unmask = eiger_enable_irq,
.mask = eiger_disable_irq, .irq_mask = eiger_disable_irq,
.mask_ack = eiger_disable_irq, .irq_mask_ack = eiger_disable_irq,
}; };
static void static void
@ -136,8 +138,8 @@ eiger_init_irq(void)
init_i8259a_irqs(); init_i8259a_irqs();
for (i = 16; i < 128; ++i) { for (i = 16; i < 128; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq); set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
} }
} }

View file

@ -63,34 +63,34 @@
*/ */
static void static void
jensen_local_enable(unsigned int irq) jensen_local_enable(struct irq_data *d)
{ {
/* the parport is really hw IRQ 1, silly Jensen. */ /* the parport is really hw IRQ 1, silly Jensen. */
if (irq == 7) if (d->irq == 7)
i8259a_enable_irq(1); i8259a_enable_irq(d);
} }
static void static void
jensen_local_disable(unsigned int irq) jensen_local_disable(struct irq_data *d)
{ {
/* the parport is really hw IRQ 1, silly Jensen. */ /* the parport is really hw IRQ 1, silly Jensen. */
if (irq == 7) if (d->irq == 7)
i8259a_disable_irq(1); i8259a_disable_irq(d);
} }
static void static void
jensen_local_mask_ack(unsigned int irq) jensen_local_mask_ack(struct irq_data *d)
{ {
/* the parport is really hw IRQ 1, silly Jensen. */ /* the parport is really hw IRQ 1, silly Jensen. */
if (irq == 7) if (d->irq == 7)
i8259a_mask_and_ack_irq(1); i8259a_mask_and_ack_irq(d);
} }
static struct irq_chip jensen_local_irq_type = { static struct irq_chip jensen_local_irq_type = {
.name = "LOCAL", .name = "LOCAL",
.unmask = jensen_local_enable, .irq_unmask = jensen_local_enable,
.mask = jensen_local_disable, .irq_mask = jensen_local_disable,
.mask_ack = jensen_local_mask_ack, .irq_mask_ack = jensen_local_mask_ack,
}; };
static void static void

View file

@ -104,9 +104,10 @@ io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
} }
static void static void
io7_enable_irq(unsigned int irq) io7_enable_irq(struct irq_data *d)
{ {
volatile unsigned long *ctl; volatile unsigned long *ctl;
unsigned int irq = d->irq;
struct io7 *io7; struct io7 *io7;
ctl = io7_get_irq_ctl(irq, &io7); ctl = io7_get_irq_ctl(irq, &io7);
@ -115,7 +116,7 @@ io7_enable_irq(unsigned int irq)
__func__, irq); __func__, irq);
return; return;
} }
spin_lock(&io7->irq_lock); spin_lock(&io7->irq_lock);
*ctl |= 1UL << 24; *ctl |= 1UL << 24;
mb(); mb();
@ -124,9 +125,10 @@ io7_enable_irq(unsigned int irq)
} }
static void static void
io7_disable_irq(unsigned int irq) io7_disable_irq(struct irq_data *d)
{ {
volatile unsigned long *ctl; volatile unsigned long *ctl;
unsigned int irq = d->irq;
struct io7 *io7; struct io7 *io7;
ctl = io7_get_irq_ctl(irq, &io7); ctl = io7_get_irq_ctl(irq, &io7);
@ -135,7 +137,7 @@ io7_disable_irq(unsigned int irq)
__func__, irq); __func__, irq);
return; return;
} }
spin_lock(&io7->irq_lock); spin_lock(&io7->irq_lock);
*ctl &= ~(1UL << 24); *ctl &= ~(1UL << 24);
mb(); mb();
@ -144,35 +146,29 @@ io7_disable_irq(unsigned int irq)
} }
static void static void
marvel_irq_noop(unsigned int irq) marvel_irq_noop(struct irq_data *d)
{ {
return; return;
}
static unsigned int
marvel_irq_noop_return(unsigned int irq)
{
return 0;
} }
static struct irq_chip marvel_legacy_irq_type = { static struct irq_chip marvel_legacy_irq_type = {
.name = "LEGACY", .name = "LEGACY",
.mask = marvel_irq_noop, .irq_mask = marvel_irq_noop,
.unmask = marvel_irq_noop, .irq_unmask = marvel_irq_noop,
}; };
static struct irq_chip io7_lsi_irq_type = { static struct irq_chip io7_lsi_irq_type = {
.name = "LSI", .name = "LSI",
.unmask = io7_enable_irq, .irq_unmask = io7_enable_irq,
.mask = io7_disable_irq, .irq_mask = io7_disable_irq,
.mask_ack = io7_disable_irq, .irq_mask_ack = io7_disable_irq,
}; };
static struct irq_chip io7_msi_irq_type = { static struct irq_chip io7_msi_irq_type = {
.name = "MSI", .name = "MSI",
.unmask = io7_enable_irq, .irq_unmask = io7_enable_irq,
.mask = io7_disable_irq, .irq_mask = io7_disable_irq,
.ack = marvel_irq_noop, .irq_ack = marvel_irq_noop,
}; };
static void static void
@ -280,8 +276,8 @@ init_io7_irqs(struct io7 *io7,
/* Set up the lsi irqs. */ /* Set up the lsi irqs. */
for (i = 0; i < 128; ++i) { for (i = 0; i < 128; ++i) {
irq_to_desc(base + i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq); set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
} }
/* Disable the implemented irqs in hardware. */ /* Disable the implemented irqs in hardware. */
@ -294,8 +290,8 @@ init_io7_irqs(struct io7 *io7,
/* Set up the msi irqs. */ /* Set up the msi irqs. */
for (i = 128; i < (128 + 512); ++i) { for (i = 128; i < (128 + 512); ++i) {
irq_to_desc(base + i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq); set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
} }
for (i = 0; i < 16; ++i) for (i = 0; i < 16; ++i)

View file

@ -43,22 +43,22 @@ mikasa_update_irq_hw(int mask)
} }
static inline void static inline void
mikasa_enable_irq(unsigned int irq) mikasa_enable_irq(struct irq_data *d)
{ {
mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16)); mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16));
} }
static void static void
mikasa_disable_irq(unsigned int irq) mikasa_disable_irq(struct irq_data *d)
{ {
mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16))); mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16)));
} }
static struct irq_chip mikasa_irq_type = { static struct irq_chip mikasa_irq_type = {
.name = "MIKASA", .name = "MIKASA",
.unmask = mikasa_enable_irq, .irq_unmask = mikasa_enable_irq,
.mask = mikasa_disable_irq, .irq_mask = mikasa_disable_irq,
.mask_ack = mikasa_disable_irq, .irq_mask_ack = mikasa_disable_irq,
}; };
static void static void
@ -98,8 +98,8 @@ mikasa_init_irq(void)
mikasa_update_irq_hw(0); mikasa_update_irq_hw(0);
for (i = 16; i < 32; ++i) { for (i = 16; i < 32; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq); set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
} }
init_i8259a_irqs(); init_i8259a_irqs();

View file

@ -48,22 +48,22 @@ noritake_update_irq_hw(int irq, int mask)
} }
static void static void
noritake_enable_irq(unsigned int irq) noritake_enable_irq(struct irq_data *d)
{ {
noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16)); noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16));
} }
static void static void
noritake_disable_irq(unsigned int irq) noritake_disable_irq(struct irq_data *d)
{ {
noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16))); noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16)));
} }
static struct irq_chip noritake_irq_type = { static struct irq_chip noritake_irq_type = {
.name = "NORITAKE", .name = "NORITAKE",
.unmask = noritake_enable_irq, .irq_unmask = noritake_enable_irq,
.mask = noritake_disable_irq, .irq_mask = noritake_disable_irq,
.mask_ack = noritake_disable_irq, .irq_mask_ack = noritake_disable_irq,
}; };
static void static void
@ -127,8 +127,8 @@ noritake_init_irq(void)
outw(0, 0x54c); outw(0, 0x54c);
for (i = 16; i < 48; ++i) { for (i = 16; i < 48; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq); set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
} }
init_i8259a_irqs(); init_i8259a_irqs();

View file

@ -56,9 +56,10 @@ rawhide_update_irq_hw(int hose, int mask)
(((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0)) (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0))
static inline void static inline void
rawhide_enable_irq(unsigned int irq) rawhide_enable_irq(struct irq_data *d)
{ {
unsigned int mask, hose; unsigned int mask, hose;
unsigned int irq = d->irq;
irq -= 16; irq -= 16;
hose = irq / 24; hose = irq / 24;
@ -76,9 +77,10 @@ rawhide_enable_irq(unsigned int irq)
} }
static void static void
rawhide_disable_irq(unsigned int irq) rawhide_disable_irq(struct irq_data *d)
{ {
unsigned int mask, hose; unsigned int mask, hose;
unsigned int irq = d->irq;
irq -= 16; irq -= 16;
hose = irq / 24; hose = irq / 24;
@ -96,9 +98,10 @@ rawhide_disable_irq(unsigned int irq)
} }
static void static void
rawhide_mask_and_ack_irq(unsigned int irq) rawhide_mask_and_ack_irq(struct irq_data *d)
{ {
unsigned int mask, mask1, hose; unsigned int mask, mask1, hose;
unsigned int irq = d->irq;
irq -= 16; irq -= 16;
hose = irq / 24; hose = irq / 24;
@ -123,9 +126,9 @@ rawhide_mask_and_ack_irq(unsigned int irq)
static struct irq_chip rawhide_irq_type = { static struct irq_chip rawhide_irq_type = {
.name = "RAWHIDE", .name = "RAWHIDE",
.unmask = rawhide_enable_irq, .irq_unmask = rawhide_enable_irq,
.mask = rawhide_disable_irq, .irq_mask = rawhide_disable_irq,
.mask_ack = rawhide_mask_and_ack_irq, .irq_mask_ack = rawhide_mask_and_ack_irq,
}; };
static void static void
@ -177,8 +180,8 @@ rawhide_init_irq(void)
} }
for (i = 16; i < 128; ++i) { for (i = 16; i < 128; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq); set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
} }
init_i8259a_irqs(); init_i8259a_irqs();

View file

@ -47,22 +47,22 @@ rx164_update_irq_hw(unsigned long mask)
} }
static inline void static inline void
rx164_enable_irq(unsigned int irq) rx164_enable_irq(struct irq_data *d)
{ {
rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
} }
static void static void
rx164_disable_irq(unsigned int irq) rx164_disable_irq(struct irq_data *d)
{ {
rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
} }
static struct irq_chip rx164_irq_type = { static struct irq_chip rx164_irq_type = {
.name = "RX164", .name = "RX164",
.unmask = rx164_enable_irq, .irq_unmask = rx164_enable_irq,
.mask = rx164_disable_irq, .irq_mask = rx164_disable_irq,
.mask_ack = rx164_disable_irq, .irq_mask_ack = rx164_disable_irq,
}; };
static void static void
@ -99,8 +99,8 @@ rx164_init_irq(void)
rx164_update_irq_hw(0); rx164_update_irq_hw(0);
for (i = 16; i < 40; ++i) { for (i = 16; i < 40; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq); set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
} }
init_i8259a_irqs(); init_i8259a_irqs();

View file

@ -443,11 +443,11 @@ lynx_swizzle(struct pci_dev *dev, u8 *pinp)
/* GENERIC irq routines */ /* GENERIC irq routines */
static inline void static inline void
sable_lynx_enable_irq(unsigned int irq) sable_lynx_enable_irq(struct irq_data *d)
{ {
unsigned long bit, mask; unsigned long bit, mask;
bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
spin_lock(&sable_lynx_irq_lock); spin_lock(&sable_lynx_irq_lock);
mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit); mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit);
sable_lynx_irq_swizzle->update_irq_hw(bit, mask); sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@ -459,11 +459,11 @@ sable_lynx_enable_irq(unsigned int irq)
} }
static void static void
sable_lynx_disable_irq(unsigned int irq) sable_lynx_disable_irq(struct irq_data *d)
{ {
unsigned long bit, mask; unsigned long bit, mask;
bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
spin_lock(&sable_lynx_irq_lock); spin_lock(&sable_lynx_irq_lock);
mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
sable_lynx_irq_swizzle->update_irq_hw(bit, mask); sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@ -475,11 +475,11 @@ sable_lynx_disable_irq(unsigned int irq)
} }
static void static void
sable_lynx_mask_and_ack_irq(unsigned int irq) sable_lynx_mask_and_ack_irq(struct irq_data *d)
{ {
unsigned long bit, mask; unsigned long bit, mask;
bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
spin_lock(&sable_lynx_irq_lock); spin_lock(&sable_lynx_irq_lock);
mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
sable_lynx_irq_swizzle->update_irq_hw(bit, mask); sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@ -489,9 +489,9 @@ sable_lynx_mask_and_ack_irq(unsigned int irq)
static struct irq_chip sable_lynx_irq_type = { static struct irq_chip sable_lynx_irq_type = {
.name = "SABLE/LYNX", .name = "SABLE/LYNX",
.unmask = sable_lynx_enable_irq, .irq_unmask = sable_lynx_enable_irq,
.mask = sable_lynx_disable_irq, .irq_mask = sable_lynx_disable_irq,
.mask_ack = sable_lynx_mask_and_ack_irq, .irq_mask_ack = sable_lynx_mask_and_ack_irq,
}; };
static void static void
@ -518,9 +518,9 @@ sable_lynx_init_irq(int nr_of_irqs)
long i; long i;
for (i = 0; i < nr_of_irqs; ++i) { for (i = 0; i < nr_of_irqs; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &sable_lynx_irq_type, set_irq_chip_and_handler(i, &sable_lynx_irq_type,
handle_level_irq); handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
} }
common_init_isa_dma(); common_init_isa_dma();

View file

@ -45,16 +45,18 @@ takara_update_irq_hw(unsigned long irq, unsigned long mask)
} }
static inline void static inline void
takara_enable_irq(unsigned int irq) takara_enable_irq(struct irq_data *d)
{ {
unsigned int irq = d->irq;
unsigned long mask; unsigned long mask;
mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
takara_update_irq_hw(irq, mask); takara_update_irq_hw(irq, mask);
} }
static void static void
takara_disable_irq(unsigned int irq) takara_disable_irq(struct irq_data *d)
{ {
unsigned int irq = d->irq;
unsigned long mask; unsigned long mask;
mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
takara_update_irq_hw(irq, mask); takara_update_irq_hw(irq, mask);
@ -62,9 +64,9 @@ takara_disable_irq(unsigned int irq)
static struct irq_chip takara_irq_type = { static struct irq_chip takara_irq_type = {
.name = "TAKARA", .name = "TAKARA",
.unmask = takara_enable_irq, .irq_unmask = takara_enable_irq,
.mask = takara_disable_irq, .irq_mask = takara_disable_irq,
.mask_ack = takara_disable_irq, .irq_mask_ack = takara_disable_irq,
}; };
static void static void
@ -136,8 +138,8 @@ takara_init_irq(void)
takara_update_irq_hw(i, -1); takara_update_irq_hw(i, -1);
for (i = 16; i < 128; ++i) { for (i = 16; i < 128; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq); set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
} }
common_init_isa_dma(); common_init_isa_dma();

View file

@ -112,8 +112,9 @@ titan_update_irq_hw(unsigned long mask)
} }
static inline void static inline void
titan_enable_irq(unsigned int irq) titan_enable_irq(struct irq_data *d)
{ {
unsigned int irq = d->irq;
spin_lock(&titan_irq_lock); spin_lock(&titan_irq_lock);
titan_cached_irq_mask |= 1UL << (irq - 16); titan_cached_irq_mask |= 1UL << (irq - 16);
titan_update_irq_hw(titan_cached_irq_mask); titan_update_irq_hw(titan_cached_irq_mask);
@ -121,8 +122,9 @@ titan_enable_irq(unsigned int irq)
} }
static inline void static inline void
titan_disable_irq(unsigned int irq) titan_disable_irq(struct irq_data *d)
{ {
unsigned int irq = d->irq;
spin_lock(&titan_irq_lock); spin_lock(&titan_irq_lock);
titan_cached_irq_mask &= ~(1UL << (irq - 16)); titan_cached_irq_mask &= ~(1UL << (irq - 16));
titan_update_irq_hw(titan_cached_irq_mask); titan_update_irq_hw(titan_cached_irq_mask);
@ -144,7 +146,8 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
} }
static int static int
titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
bool force)
{ {
spin_lock(&titan_irq_lock); spin_lock(&titan_irq_lock);
titan_cpu_set_irq_affinity(irq - 16, *affinity); titan_cpu_set_irq_affinity(irq - 16, *affinity);
@ -175,17 +178,17 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax)
{ {
long i; long i;
for (i = imin; i <= imax; ++i) { for (i = imin; i <= imax; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, ops, handle_level_irq); set_irq_chip_and_handler(i, ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
} }
} }
static struct irq_chip titan_irq_type = { static struct irq_chip titan_irq_type = {
.name = "TITAN", .name = "TITAN",
.unmask = titan_enable_irq, .irq_unmask = titan_enable_irq,
.mask = titan_disable_irq, .irq_mask = titan_disable_irq,
.mask_ack = titan_disable_irq, .irq_mask_ack = titan_disable_irq,
.set_affinity = titan_set_irq_affinity, .irq_set_affinity = titan_set_irq_affinity,
}; };
static irqreturn_t static irqreturn_t

View file

@ -104,10 +104,12 @@ wildfire_init_irq_hw(void)
} }
static void static void
wildfire_enable_irq(unsigned int irq) wildfire_enable_irq(struct irq_data *d)
{ {
unsigned int irq = d->irq;
if (irq < 16) if (irq < 16)
i8259a_enable_irq(irq); i8259a_enable_irq(d);
spin_lock(&wildfire_irq_lock); spin_lock(&wildfire_irq_lock);
set_bit(irq, &cached_irq_mask); set_bit(irq, &cached_irq_mask);
@ -116,10 +118,12 @@ wildfire_enable_irq(unsigned int irq)
} }
static void static void
wildfire_disable_irq(unsigned int irq) wildfire_disable_irq(struct irq_data *d)
{ {
unsigned int irq = d->irq;
if (irq < 16) if (irq < 16)
i8259a_disable_irq(irq); i8259a_disable_irq(d);
spin_lock(&wildfire_irq_lock); spin_lock(&wildfire_irq_lock);
clear_bit(irq, &cached_irq_mask); clear_bit(irq, &cached_irq_mask);
@ -128,10 +132,12 @@ wildfire_disable_irq(unsigned int irq)
} }
static void static void
wildfire_mask_and_ack_irq(unsigned int irq) wildfire_mask_and_ack_irq(struct irq_data *d)
{ {
unsigned int irq = d->irq;
if (irq < 16) if (irq < 16)
i8259a_mask_and_ack_irq(irq); i8259a_mask_and_ack_irq(d);
spin_lock(&wildfire_irq_lock); spin_lock(&wildfire_irq_lock);
clear_bit(irq, &cached_irq_mask); clear_bit(irq, &cached_irq_mask);
@ -141,9 +147,9 @@ wildfire_mask_and_ack_irq(unsigned int irq)
static struct irq_chip wildfire_irq_type = { static struct irq_chip wildfire_irq_type = {
.name = "WILDFIRE", .name = "WILDFIRE",
.unmask = wildfire_enable_irq, .irq_unmask = wildfire_enable_irq,
.mask = wildfire_disable_irq, .irq_mask = wildfire_disable_irq,
.mask_ack = wildfire_mask_and_ack_irq, .irq_mask_ack = wildfire_mask_and_ack_irq,
}; };
static void __init static void __init
@ -177,21 +183,21 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
for (i = 0; i < 16; ++i) { for (i = 0; i < 16; ++i) {
if (i == 2) if (i == 2)
continue; continue;
irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
handle_level_irq); handle_level_irq);
irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
} }
irq_to_desc(36+irq_bias)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type, set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type,
handle_level_irq); handle_level_irq);
irq_set_status_flags(36 + irq_bias, IRQ_LEVEL);
for (i = 40; i < 64; ++i) { for (i = 40; i < 64; ++i) {
irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
handle_level_irq); handle_level_irq);
irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
} }
setup_irq(32+irq_bias, &isa_enable); setup_irq(32+irq_bias, &isa_enable);
} }
static void __init static void __init

View file

@ -6,6 +6,8 @@ config ARM_VIC
config ARM_VIC_NR config ARM_VIC_NR
int int
default 4 if ARCH_S5PV210
default 3 if ARCH_S5P6442 || ARCH_S5PC100
default 2 default 2
depends on ARM_VIC depends on ARM_VIC
help help

View file

@ -15,10 +15,6 @@ struct meminfo;
struct sys_timer; struct sys_timer;
struct machine_desc { struct machine_desc {
/*
* Note! The first two elements are used
* by assembler code in head.S, head-common.S
*/
unsigned int nr; /* architecture number */ unsigned int nr; /* architecture number */
const char *name; /* architecture name */ const char *name; /* architecture name */
unsigned long boot_params; /* tagged list */ unsigned long boot_params; /* tagged list */

View file

@ -10,6 +10,8 @@
#ifndef _ASMARM_PGALLOC_H #ifndef _ASMARM_PGALLOC_H
#define _ASMARM_PGALLOC_H #define _ASMARM_PGALLOC_H
#include <linux/pagemap.h>
#include <asm/domain.h> #include <asm/domain.h>
#include <asm/pgtable-hwdef.h> #include <asm/pgtable-hwdef.h>
#include <asm/processor.h> #include <asm/processor.h>

View file

@ -836,9 +836,11 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
/* /*
* One-time initialisation. * One-time initialisation.
*/ */
static void reset_ctrl_regs(void *unused) static void reset_ctrl_regs(void *info)
{ {
int i; int i, cpu = smp_processor_id();
u32 dbg_power;
cpumask_t *cpumask = info;
/* /*
* v7 debug contains save and restore registers so that debug state * v7 debug contains save and restore registers so that debug state
@ -849,6 +851,17 @@ static void reset_ctrl_regs(void *unused)
* later on. * later on.
*/ */
if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
/*
* Ensure sticky power-down is clear (i.e. debug logic is
* powered up).
*/
asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
if ((dbg_power & 0x1) == 0) {
pr_warning("CPU %d debug is powered down!\n", cpu);
cpumask_or(cpumask, cpumask, cpumask_of(cpu));
return;
}
/* /*
* Unconditionally clear the lock by writing a value * Unconditionally clear the lock by writing a value
* other than 0xC5ACCE55 to the access register. * other than 0xC5ACCE55 to the access register.
@ -887,6 +900,7 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
static int __init arch_hw_breakpoint_init(void) static int __init arch_hw_breakpoint_init(void)
{ {
u32 dscr; u32 dscr;
cpumask_t cpumask = { CPU_BITS_NONE };
debug_arch = get_debug_arch(); debug_arch = get_debug_arch();
@ -911,7 +925,13 @@ static int __init arch_hw_breakpoint_init(void)
* Reset the breakpoint resources. We assume that a halting * Reset the breakpoint resources. We assume that a halting
* debugger will leave the world in a nice state for us. * debugger will leave the world in a nice state for us.
*/ */
on_each_cpu(reset_ctrl_regs, NULL, 1); on_each_cpu(reset_ctrl_regs, &cpumask, 1);
if (!cpumask_empty(&cpumask)) {
core_num_brps = 0;
core_num_reserved_brps = 0;
core_num_wrps = 0;
return 0;
}
ARM_DBG_READ(c1, 0, dscr); ARM_DBG_READ(c1, 0, dscr);
if (dscr & ARM_DSCR_HDBGEN) { if (dscr & ARM_DSCR_HDBGEN) {

View file

@ -996,10 +996,10 @@ static int ptrace_gethbpregs(struct task_struct *tsk, long num,
while (!(arch_ctrl.len & 0x1)) while (!(arch_ctrl.len & 0x1))
arch_ctrl.len >>= 1; arch_ctrl.len >>= 1;
if (idx & 0x1) if (num & 0x1)
reg = encode_ctrl_reg(arch_ctrl);
else
reg = bp->attr.bp_addr; reg = bp->attr.bp_addr;
else
reg = encode_ctrl_reg(arch_ctrl);
} }
put: put:

View file

@ -132,7 +132,7 @@ out:
return ret; return ret;
} }
static int __init davinci_cpu_init(struct cpufreq_policy *policy) static int davinci_cpu_init(struct cpufreq_policy *policy)
{ {
int result = 0; int result = 0;
struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;

View file

@ -480,8 +480,15 @@ static struct platform_device da850_mcasp_device = {
.resource = da850_mcasp_resources, .resource = da850_mcasp_resources,
}; };
struct platform_device davinci_pcm_device = {
.name = "davinci-pcm-audio",
.id = -1,
};
void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata) void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata)
{ {
platform_device_register(&davinci_pcm_device);
/* DA830/OMAP-L137 has 3 instances of McASP */ /* DA830/OMAP-L137 has 3 instances of McASP */
if (cpu_is_davinci_da830() && id == 1) { if (cpu_is_davinci_da830() && id == 1) {
da830_mcasp1_device.dev.platform_data = pdata; da830_mcasp1_device.dev.platform_data = pdata;

View file

@ -58,7 +58,7 @@ static int tnetv107x_gpio_request(struct gpio_chip *chip, unsigned offset)
spin_lock_irqsave(&ctlr->lock, flags); spin_lock_irqsave(&ctlr->lock, flags);
gpio_reg_set_bit(&regs->enable, gpio); gpio_reg_set_bit(regs->enable, gpio);
spin_unlock_irqrestore(&ctlr->lock, flags); spin_unlock_irqrestore(&ctlr->lock, flags);
@ -74,7 +74,7 @@ static void tnetv107x_gpio_free(struct gpio_chip *chip, unsigned offset)
spin_lock_irqsave(&ctlr->lock, flags); spin_lock_irqsave(&ctlr->lock, flags);
gpio_reg_clear_bit(&regs->enable, gpio); gpio_reg_clear_bit(regs->enable, gpio);
spin_unlock_irqrestore(&ctlr->lock, flags); spin_unlock_irqrestore(&ctlr->lock, flags);
} }
@ -88,7 +88,7 @@ static int tnetv107x_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
spin_lock_irqsave(&ctlr->lock, flags); spin_lock_irqsave(&ctlr->lock, flags);
gpio_reg_set_bit(&regs->direction, gpio); gpio_reg_set_bit(regs->direction, gpio);
spin_unlock_irqrestore(&ctlr->lock, flags); spin_unlock_irqrestore(&ctlr->lock, flags);
@ -106,11 +106,11 @@ static int tnetv107x_gpio_dir_out(struct gpio_chip *chip,
spin_lock_irqsave(&ctlr->lock, flags); spin_lock_irqsave(&ctlr->lock, flags);
if (value) if (value)
gpio_reg_set_bit(&regs->data_out, gpio); gpio_reg_set_bit(regs->data_out, gpio);
else else
gpio_reg_clear_bit(&regs->data_out, gpio); gpio_reg_clear_bit(regs->data_out, gpio);
gpio_reg_clear_bit(&regs->direction, gpio); gpio_reg_clear_bit(regs->direction, gpio);
spin_unlock_irqrestore(&ctlr->lock, flags); spin_unlock_irqrestore(&ctlr->lock, flags);
@ -124,7 +124,7 @@ static int tnetv107x_gpio_get(struct gpio_chip *chip, unsigned offset)
unsigned gpio = chip->base + offset; unsigned gpio = chip->base + offset;
int ret; int ret;
ret = gpio_reg_get_bit(&regs->data_in, gpio); ret = gpio_reg_get_bit(regs->data_in, gpio);
return ret ? 1 : 0; return ret ? 1 : 0;
} }
@ -140,9 +140,9 @@ static void tnetv107x_gpio_set(struct gpio_chip *chip,
spin_lock_irqsave(&ctlr->lock, flags); spin_lock_irqsave(&ctlr->lock, flags);
if (value) if (value)
gpio_reg_set_bit(&regs->data_out, gpio); gpio_reg_set_bit(regs->data_out, gpio);
else else
gpio_reg_clear_bit(&regs->data_out, gpio); gpio_reg_clear_bit(regs->data_out, gpio);
spin_unlock_irqrestore(&ctlr->lock, flags); spin_unlock_irqrestore(&ctlr->lock, flags);
} }

View file

@ -1,6 +1,8 @@
#ifndef __MACH_CLKDEV_H #ifndef __MACH_CLKDEV_H
#define __MACH_CLKDEV_H #define __MACH_CLKDEV_H
struct clk;
static inline int __clk_get(struct clk *clk) static inline int __clk_get(struct clk *clk)
{ {
return 1; return 1;

View file

@ -23,6 +23,7 @@
#include <linux/gpio.h> #include <linux/gpio.h>
#include <linux/input.h> #include <linux/input.h>
#include <linux/gpio_keys.h> #include <linux/gpio_keys.h>
#include <linux/opp.h>
#include <linux/mtd/mtd.h> #include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h> #include <linux/mtd/partitions.h>
@ -45,10 +46,12 @@
#include <plat/gpmc.h> #include <plat/gpmc.h>
#include <plat/nand.h> #include <plat/nand.h>
#include <plat/usb.h> #include <plat/usb.h>
#include <plat/omap_device.h>
#include "mux.h" #include "mux.h"
#include "hsmmc.h" #include "hsmmc.h"
#include "timer-gp.h" #include "timer-gp.h"
#include "pm.h"
#define NAND_BLOCK_SIZE SZ_128K #define NAND_BLOCK_SIZE SZ_128K
@ -603,6 +606,52 @@ static struct omap_musb_board_data musb_board_data = {
.power = 100, .power = 100,
}; };
static void __init beagle_opp_init(void)
{
int r = 0;
/* Initialize the omap3 opp table */
if (omap3_opp_init()) {
pr_err("%s: opp default init failed\n", __func__);
return;
}
/* Custom OPP enabled for XM */
if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) {
struct omap_hwmod *mh = omap_hwmod_lookup("mpu");
struct omap_hwmod *dh = omap_hwmod_lookup("iva");
struct device *dev;
if (!mh || !dh) {
pr_err("%s: Aiee.. no mpu/dsp devices? %p %p\n",
__func__, mh, dh);
return;
}
/* Enable MPU 1GHz and lower opps */
dev = &mh->od->pdev.dev;
r = opp_enable(dev, 800000000);
/* TODO: MPU 1GHz needs SR and ABB */
/* Enable IVA 800MHz and lower opps */
dev = &dh->od->pdev.dev;
r |= opp_enable(dev, 660000000);
/* TODO: DSP 800MHz needs SR and ABB */
if (r) {
pr_err("%s: failed to enable higher opp %d\n",
__func__, r);
/*
* Cleanup - disable the higher freqs - we dont care
* about the results
*/
dev = &mh->od->pdev.dev;
opp_disable(dev, 800000000);
dev = &dh->od->pdev.dev;
opp_disable(dev, 660000000);
}
}
return;
}
static void __init omap3_beagle_init(void) static void __init omap3_beagle_init(void)
{ {
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
@ -627,6 +676,7 @@ static void __init omap3_beagle_init(void)
omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT); omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
beagle_display_init(); beagle_display_init();
beagle_opp_init();
} }
MACHINE_START(OMAP3_BEAGLE, "OMAP3 Beagle Board") MACHINE_START(OMAP3_BEAGLE, "OMAP3 Beagle Board")

View file

@ -50,9 +50,6 @@ static struct omap_globals omap242x_globals = {
.ctrl = OMAP242X_CTRL_BASE, .ctrl = OMAP242X_CTRL_BASE,
.prm = OMAP2420_PRM_BASE, .prm = OMAP2420_PRM_BASE,
.cm = OMAP2420_CM_BASE, .cm = OMAP2420_CM_BASE,
.uart1_phys = OMAP2_UART1_BASE,
.uart2_phys = OMAP2_UART2_BASE,
.uart3_phys = OMAP2_UART3_BASE,
}; };
void __init omap2_set_globals_242x(void) void __init omap2_set_globals_242x(void)
@ -71,9 +68,6 @@ static struct omap_globals omap243x_globals = {
.ctrl = OMAP243X_CTRL_BASE, .ctrl = OMAP243X_CTRL_BASE,
.prm = OMAP2430_PRM_BASE, .prm = OMAP2430_PRM_BASE,
.cm = OMAP2430_CM_BASE, .cm = OMAP2430_CM_BASE,
.uart1_phys = OMAP2_UART1_BASE,
.uart2_phys = OMAP2_UART2_BASE,
.uart3_phys = OMAP2_UART3_BASE,
}; };
void __init omap2_set_globals_243x(void) void __init omap2_set_globals_243x(void)
@ -92,10 +86,6 @@ static struct omap_globals omap3_globals = {
.ctrl = OMAP343X_CTRL_BASE, .ctrl = OMAP343X_CTRL_BASE,
.prm = OMAP3430_PRM_BASE, .prm = OMAP3430_PRM_BASE,
.cm = OMAP3430_CM_BASE, .cm = OMAP3430_CM_BASE,
.uart1_phys = OMAP3_UART1_BASE,
.uart2_phys = OMAP3_UART2_BASE,
.uart3_phys = OMAP3_UART3_BASE,
.uart4_phys = OMAP3_UART4_BASE, /* Only on 3630 */
}; };
void __init omap2_set_globals_3xxx(void) void __init omap2_set_globals_3xxx(void)
@ -140,10 +130,6 @@ static struct omap_globals omap4_globals = {
.prm = OMAP4430_PRM_BASE, .prm = OMAP4430_PRM_BASE,
.cm = OMAP4430_CM_BASE, .cm = OMAP4430_CM_BASE,
.cm2 = OMAP4430_CM2_BASE, .cm2 = OMAP4430_CM2_BASE,
.uart1_phys = OMAP4_UART1_BASE,
.uart2_phys = OMAP4_UART2_BASE,
.uart3_phys = OMAP4_UART3_BASE,
.uart4_phys = OMAP4_UART4_BASE,
}; };
void __init omap2_set_globals_443x(void) void __init omap2_set_globals_443x(void)

View file

@ -58,6 +58,7 @@ struct omap3_processor_cx {
u32 core_state; u32 core_state;
u32 threshold; u32 threshold;
u32 flags; u32 flags;
const char *desc;
}; };
struct omap3_processor_cx omap3_power_states[OMAP3_MAX_STATES]; struct omap3_processor_cx omap3_power_states[OMAP3_MAX_STATES];
@ -365,6 +366,7 @@ void omap_init_power_states(void)
omap3_power_states[OMAP3_STATE_C1].mpu_state = PWRDM_POWER_ON; omap3_power_states[OMAP3_STATE_C1].mpu_state = PWRDM_POWER_ON;
omap3_power_states[OMAP3_STATE_C1].core_state = PWRDM_POWER_ON; omap3_power_states[OMAP3_STATE_C1].core_state = PWRDM_POWER_ON;
omap3_power_states[OMAP3_STATE_C1].flags = CPUIDLE_FLAG_TIME_VALID; omap3_power_states[OMAP3_STATE_C1].flags = CPUIDLE_FLAG_TIME_VALID;
omap3_power_states[OMAP3_STATE_C1].desc = "MPU ON + CORE ON";
/* C2 . MPU WFI + Core inactive */ /* C2 . MPU WFI + Core inactive */
omap3_power_states[OMAP3_STATE_C2].valid = omap3_power_states[OMAP3_STATE_C2].valid =
@ -380,6 +382,7 @@ void omap_init_power_states(void)
omap3_power_states[OMAP3_STATE_C2].core_state = PWRDM_POWER_ON; omap3_power_states[OMAP3_STATE_C2].core_state = PWRDM_POWER_ON;
omap3_power_states[OMAP3_STATE_C2].flags = CPUIDLE_FLAG_TIME_VALID | omap3_power_states[OMAP3_STATE_C2].flags = CPUIDLE_FLAG_TIME_VALID |
CPUIDLE_FLAG_CHECK_BM; CPUIDLE_FLAG_CHECK_BM;
omap3_power_states[OMAP3_STATE_C2].desc = "MPU ON + CORE ON";
/* C3 . MPU CSWR + Core inactive */ /* C3 . MPU CSWR + Core inactive */
omap3_power_states[OMAP3_STATE_C3].valid = omap3_power_states[OMAP3_STATE_C3].valid =
@ -395,6 +398,7 @@ void omap_init_power_states(void)
omap3_power_states[OMAP3_STATE_C3].core_state = PWRDM_POWER_ON; omap3_power_states[OMAP3_STATE_C3].core_state = PWRDM_POWER_ON;
omap3_power_states[OMAP3_STATE_C3].flags = CPUIDLE_FLAG_TIME_VALID | omap3_power_states[OMAP3_STATE_C3].flags = CPUIDLE_FLAG_TIME_VALID |
CPUIDLE_FLAG_CHECK_BM; CPUIDLE_FLAG_CHECK_BM;
omap3_power_states[OMAP3_STATE_C3].desc = "MPU RET + CORE ON";
/* C4 . MPU OFF + Core inactive */ /* C4 . MPU OFF + Core inactive */
omap3_power_states[OMAP3_STATE_C4].valid = omap3_power_states[OMAP3_STATE_C4].valid =
@ -410,6 +414,7 @@ void omap_init_power_states(void)
omap3_power_states[OMAP3_STATE_C4].core_state = PWRDM_POWER_ON; omap3_power_states[OMAP3_STATE_C4].core_state = PWRDM_POWER_ON;
omap3_power_states[OMAP3_STATE_C4].flags = CPUIDLE_FLAG_TIME_VALID | omap3_power_states[OMAP3_STATE_C4].flags = CPUIDLE_FLAG_TIME_VALID |
CPUIDLE_FLAG_CHECK_BM; CPUIDLE_FLAG_CHECK_BM;
omap3_power_states[OMAP3_STATE_C4].desc = "MPU OFF + CORE ON";
/* C5 . MPU CSWR + Core CSWR*/ /* C5 . MPU CSWR + Core CSWR*/
omap3_power_states[OMAP3_STATE_C5].valid = omap3_power_states[OMAP3_STATE_C5].valid =
@ -425,6 +430,7 @@ void omap_init_power_states(void)
omap3_power_states[OMAP3_STATE_C5].core_state = PWRDM_POWER_RET; omap3_power_states[OMAP3_STATE_C5].core_state = PWRDM_POWER_RET;
omap3_power_states[OMAP3_STATE_C5].flags = CPUIDLE_FLAG_TIME_VALID | omap3_power_states[OMAP3_STATE_C5].flags = CPUIDLE_FLAG_TIME_VALID |
CPUIDLE_FLAG_CHECK_BM; CPUIDLE_FLAG_CHECK_BM;
omap3_power_states[OMAP3_STATE_C5].desc = "MPU RET + CORE RET";
/* C6 . MPU OFF + Core CSWR */ /* C6 . MPU OFF + Core CSWR */
omap3_power_states[OMAP3_STATE_C6].valid = omap3_power_states[OMAP3_STATE_C6].valid =
@ -440,6 +446,7 @@ void omap_init_power_states(void)
omap3_power_states[OMAP3_STATE_C6].core_state = PWRDM_POWER_RET; omap3_power_states[OMAP3_STATE_C6].core_state = PWRDM_POWER_RET;
omap3_power_states[OMAP3_STATE_C6].flags = CPUIDLE_FLAG_TIME_VALID | omap3_power_states[OMAP3_STATE_C6].flags = CPUIDLE_FLAG_TIME_VALID |
CPUIDLE_FLAG_CHECK_BM; CPUIDLE_FLAG_CHECK_BM;
omap3_power_states[OMAP3_STATE_C6].desc = "MPU OFF + CORE RET";
/* C7 . MPU OFF + Core OFF */ /* C7 . MPU OFF + Core OFF */
omap3_power_states[OMAP3_STATE_C7].valid = omap3_power_states[OMAP3_STATE_C7].valid =
@ -455,6 +462,7 @@ void omap_init_power_states(void)
omap3_power_states[OMAP3_STATE_C7].core_state = PWRDM_POWER_OFF; omap3_power_states[OMAP3_STATE_C7].core_state = PWRDM_POWER_OFF;
omap3_power_states[OMAP3_STATE_C7].flags = CPUIDLE_FLAG_TIME_VALID | omap3_power_states[OMAP3_STATE_C7].flags = CPUIDLE_FLAG_TIME_VALID |
CPUIDLE_FLAG_CHECK_BM; CPUIDLE_FLAG_CHECK_BM;
omap3_power_states[OMAP3_STATE_C7].desc = "MPU OFF + CORE OFF";
/* /*
* Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
@ -464,7 +472,7 @@ void omap_init_power_states(void)
if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) { if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) {
omap3_power_states[OMAP3_STATE_C7].valid = 0; omap3_power_states[OMAP3_STATE_C7].valid = 0;
cpuidle_params_table[OMAP3_STATE_C7].valid = 0; cpuidle_params_table[OMAP3_STATE_C7].valid = 0;
WARN_ONCE(1, "%s: core off state C7 disabled due to i583\n", pr_warn("%s: core off state C7 disabled due to i583\n",
__func__); __func__);
} }
} }
@ -512,6 +520,7 @@ int __init omap3_idle_init(void)
if (cx->type == OMAP3_STATE_C1) if (cx->type == OMAP3_STATE_C1)
dev->safe_state = state; dev->safe_state = state;
sprintf(state->name, "C%d", count+1); sprintf(state->name, "C%d", count+1);
strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
count++; count++;
} }

View file

@ -17,8 +17,12 @@
* wfi used in low power code. Directly opcode is used instead * wfi used in low power code. Directly opcode is used instead
* of instruction to avoid mulit-omap build break * of instruction to avoid mulit-omap build break
*/ */
#ifdef CONFIG_THUMB2_KERNEL
#define do_wfi() __asm__ __volatile__ ("wfi" : : : "memory")
#else
#define do_wfi() \ #define do_wfi() \
__asm__ __volatile__ (".word 0xe320f003" : : : "memory") __asm__ __volatile__ (".word 0xe320f003" : : : "memory")
#endif
#ifdef CONFIG_CACHE_L2X0 #ifdef CONFIG_CACHE_L2X0
extern void __iomem *l2cache_base; extern void __iomem *l2cache_base;

View file

@ -138,10 +138,12 @@ static void omap2_mbox_disable_irq(struct omap_mbox *mbox,
omap_mbox_type_t irq) omap_mbox_type_t irq)
{ {
struct omap_mbox2_priv *p = mbox->priv; struct omap_mbox2_priv *p = mbox->priv;
u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
l = mbox_read_reg(p->irqdisable);
l &= ~bit; if (!cpu_is_omap44xx())
mbox_write_reg(l, p->irqdisable); bit = mbox_read_reg(p->irqdisable) & ~bit;
mbox_write_reg(bit, p->irqdisable);
} }
static void omap2_mbox_ack_irq(struct omap_mbox *mbox, static void omap2_mbox_ack_irq(struct omap_mbox *mbox,

View file

@ -45,5 +45,5 @@ hold: ldr r12,=0x103
* should now contain the SVC stack for this core * should now contain the SVC stack for this core
*/ */
b secondary_startup b secondary_startup
END(omap_secondary_startup) ENDPROC(omap_secondary_startup)

View file

@ -29,7 +29,7 @@ ENTRY(omap_smc1)
dsb dsb
smc #0 smc #0
ldmfd sp!, {r2-r12, pc} ldmfd sp!, {r2-r12, pc}
END(omap_smc1) ENDPROC(omap_smc1)
ENTRY(omap_modify_auxcoreboot0) ENTRY(omap_modify_auxcoreboot0)
stmfd sp!, {r1-r12, lr} stmfd sp!, {r1-r12, lr}
@ -37,7 +37,7 @@ ENTRY(omap_modify_auxcoreboot0)
dsb dsb
smc #0 smc #0
ldmfd sp!, {r1-r12, pc} ldmfd sp!, {r1-r12, pc}
END(omap_modify_auxcoreboot0) ENDPROC(omap_modify_auxcoreboot0)
ENTRY(omap_auxcoreboot_addr) ENTRY(omap_auxcoreboot_addr)
stmfd sp!, {r2-r12, lr} stmfd sp!, {r2-r12, lr}
@ -45,7 +45,7 @@ ENTRY(omap_auxcoreboot_addr)
dsb dsb
smc #0 smc #0
ldmfd sp!, {r2-r12, pc} ldmfd sp!, {r2-r12, pc}
END(omap_auxcoreboot_addr) ENDPROC(omap_auxcoreboot_addr)
ENTRY(omap_read_auxcoreboot0) ENTRY(omap_read_auxcoreboot0)
stmfd sp!, {r2-r12, lr} stmfd sp!, {r2-r12, lr}
@ -54,4 +54,4 @@ ENTRY(omap_read_auxcoreboot0)
smc #0 smc #0
mov r0, r0, lsr #9 mov r0, r0, lsr #9
ldmfd sp!, {r2-r12, pc} ldmfd sp!, {r2-r12, pc}
END(omap_read_auxcoreboot0) ENDPROC(omap_read_auxcoreboot0)

View file

@ -59,8 +59,15 @@
static bool is_offset_valid; static bool is_offset_valid;
static u8 smps_offset; static u8 smps_offset;
/*
* Flag to ensure Smartreflex bit in TWL
* being cleared in board file is not overwritten.
*/
static bool __initdata twl_sr_enable_autoinit;
#define TWL4030_DCDC_GLOBAL_CFG 0x06
#define REG_SMPS_OFFSET 0xE0 #define REG_SMPS_OFFSET 0xE0
#define SMARTREFLEX_ENABLE BIT(3)
static unsigned long twl4030_vsel_to_uv(const u8 vsel) static unsigned long twl4030_vsel_to_uv(const u8 vsel)
{ {
@ -269,6 +276,18 @@ int __init omap3_twl_init(void)
omap3_core_volt_info.vp_vddmax = OMAP3630_VP2_VLIMITTO_VDDMAX; omap3_core_volt_info.vp_vddmax = OMAP3630_VP2_VLIMITTO_VDDMAX;
} }
/*
* The smartreflex bit on twl4030 specifies if the setting of voltage
* is done over the I2C_SR path. Since this setting is independent of
* the actual usage of smartreflex AVS module, we enable TWL SR bit
* by default irrespective of whether smartreflex AVS module is enabled
* on the OMAP side or not. This is because without this bit enabled,
* the voltage scaling through vp forceupdate/bypass mechanism of
* voltage scaling will not function on TWL over I2C_SR.
*/
if (!twl_sr_enable_autoinit)
omap3_twl_set_sr_bit(true);
voltdm = omap_voltage_domain_lookup("mpu"); voltdm = omap_voltage_domain_lookup("mpu");
omap_voltage_register_pmic(voltdm, &omap3_mpu_volt_info); omap_voltage_register_pmic(voltdm, &omap3_mpu_volt_info);
@ -277,3 +296,44 @@ int __init omap3_twl_init(void)
return 0; return 0;
} }
/**
* omap3_twl_set_sr_bit() - Set/Clear SR bit on TWL
* @enable: enable SR mode in twl or not
*
* If 'enable' is true, enables Smartreflex bit on TWL 4030 to make sure
* voltage scaling through OMAP SR works. Else, the smartreflex bit
* on twl4030 is cleared as there are platforms which use OMAP3 and T2 but
* use Synchronized Scaling Hardware Strategy (ENABLE_VMODE=1) and Direct
* Strategy Software Scaling Mode (ENABLE_VMODE=0), for setting the voltages,
* in those scenarios this bit is to be cleared (enable = false).
*
* Returns 0 on sucess, error is returned if I2C read/write fails.
*/
int __init omap3_twl_set_sr_bit(bool enable)
{
u8 temp;
int ret;
if (twl_sr_enable_autoinit)
pr_warning("%s: unexpected multiple calls\n", __func__);
ret = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &temp,
TWL4030_DCDC_GLOBAL_CFG);
if (ret)
goto err;
if (enable)
temp |= SMARTREFLEX_ENABLE;
else
temp &= ~SMARTREFLEX_ENABLE;
ret = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, temp,
TWL4030_DCDC_GLOBAL_CFG);
if (!ret) {
twl_sr_enable_autoinit = true;
return 0;
}
err:
pr_err("%s: Error access to TWL4030 (%d)\n", __func__, ret);
return ret;
}

View file

@ -21,6 +21,7 @@
#include <plat/cpu.h> #include <plat/cpu.h>
#include "omap_opp_data.h" #include "omap_opp_data.h"
#include "pm.h"
static struct omap_opp_def __initdata omap34xx_opp_def_list[] = { static struct omap_opp_def __initdata omap34xx_opp_def_list[] = {
/* MPU OPP1 */ /* MPU OPP1 */
@ -88,7 +89,7 @@ static struct omap_opp_def __initdata omap36xx_opp_def_list[] = {
/** /**
* omap3_opp_init() - initialize omap3 opp table * omap3_opp_init() - initialize omap3 opp table
*/ */
static int __init omap3_opp_init(void) int __init omap3_opp_init(void)
{ {
int r = -ENODEV; int r = -ENODEV;

View file

@ -22,6 +22,7 @@
#include <plat/cpu.h> #include <plat/cpu.h>
#include "omap_opp_data.h" #include "omap_opp_data.h"
#include "pm.h"
static struct omap_opp_def __initdata omap44xx_opp_def_list[] = { static struct omap_opp_def __initdata omap44xx_opp_def_list[] = {
/* MPU OPP1 - OPP50 */ /* MPU OPP1 - OPP50 */
@ -42,7 +43,7 @@ static struct omap_opp_def __initdata omap44xx_opp_def_list[] = {
/** /**
* omap4_opp_init() - initialize omap4 opp table * omap4_opp_init() - initialize omap4 opp table
*/ */
static int __init omap4_opp_init(void) int __init omap4_opp_init(void)
{ {
int r = -ENODEV; int r = -ENODEV;

View file

@ -83,7 +83,9 @@ static int _init_omap_device(char *name, struct device **new_dev)
static void omap2_init_processor_devices(void) static void omap2_init_processor_devices(void)
{ {
_init_omap_device("mpu", &mpu_dev); _init_omap_device("mpu", &mpu_dev);
_init_omap_device("iva", &iva_dev); if (omap3_has_iva())
_init_omap_device("iva", &iva_dev);
if (cpu_is_omap44xx()) { if (cpu_is_omap44xx()) {
_init_omap_device("l3_main_1", &l3_dev); _init_omap_device("l3_main_1", &l3_dev);
_init_omap_device("dsp", &dsp_dev); _init_omap_device("dsp", &dsp_dev);

View file

@ -127,6 +127,7 @@ static inline void omap_enable_smartreflex_on_init(void) {}
#ifdef CONFIG_TWL4030_CORE #ifdef CONFIG_TWL4030_CORE
extern int omap3_twl_init(void); extern int omap3_twl_init(void);
extern int omap4_twl_init(void); extern int omap4_twl_init(void);
extern int omap3_twl_set_sr_bit(bool enable);
#else #else
static inline int omap3_twl_init(void) static inline int omap3_twl_init(void)
{ {

View file

@ -363,9 +363,6 @@ static const struct platform_suspend_ops __initdata omap_pm_ops;
/* XXX This function should be shareable between OMAP2xxx and OMAP3 */ /* XXX This function should be shareable between OMAP2xxx and OMAP3 */
static int __init clkdms_setup(struct clockdomain *clkdm, void *unused) static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
{ {
clkdm_clear_all_wkdeps(clkdm);
clkdm_clear_all_sleepdeps(clkdm);
if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO) if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO)
omap2_clkdm_allow_idle(clkdm); omap2_clkdm_allow_idle(clkdm);
else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP && else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
@ -411,10 +408,7 @@ static void __init prcm_setup_regs(void)
pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF); pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF);
omap2_clkdm_sleep(gfx_clkdm); omap2_clkdm_sleep(gfx_clkdm);
/* /* Enable hardware-supervised idle for all clkdms */
* Clear clockdomain wakeup dependencies and enable
* hardware-supervised idle for all clkdms
*/
clkdm_for_each(clkdms_setup, NULL); clkdm_for_each(clkdms_setup, NULL);
clkdm_add_wkdep(mpu_clkdm, wkup_clkdm); clkdm_add_wkdep(mpu_clkdm, wkup_clkdm);

View file

@ -311,11 +311,6 @@ static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void restore_control_register(u32 val)
{
__asm__ __volatile__ ("mcr p15, 0, %0, c1, c0, 0" : : "r" (val));
}
/* Function to restore the table entry that was modified for enabling MMU */ /* Function to restore the table entry that was modified for enabling MMU */
static void restore_table_entry(void) static void restore_table_entry(void)
{ {
@ -337,7 +332,7 @@ static void restore_table_entry(void)
control_reg_value = __raw_readl(scratchpad_address control_reg_value = __raw_readl(scratchpad_address
+ OMAP343X_CONTROL_REG_VALUE_OFFSET); + OMAP343X_CONTROL_REG_VALUE_OFFSET);
/* This will enable caches and prediction */ /* This will enable caches and prediction */
restore_control_register(control_reg_value); set_cr(control_reg_value);
} }
void omap_sram_idle(void) void omap_sram_idle(void)
@ -695,21 +690,6 @@ static void __init prcm_setup_regs(void)
u32 omap3630_grpsel_uart4_mask = cpu_is_omap3630() ? u32 omap3630_grpsel_uart4_mask = cpu_is_omap3630() ?
OMAP3630_GRPSEL_UART4_MASK : 0; OMAP3630_GRPSEL_UART4_MASK : 0;
/* XXX Reset all wkdeps. This should be done when initializing
* powerdomains */
omap2_prm_write_mod_reg(0, OMAP3430_IVA2_MOD, PM_WKDEP);
omap2_prm_write_mod_reg(0, MPU_MOD, PM_WKDEP);
omap2_prm_write_mod_reg(0, OMAP3430_DSS_MOD, PM_WKDEP);
omap2_prm_write_mod_reg(0, OMAP3430_NEON_MOD, PM_WKDEP);
omap2_prm_write_mod_reg(0, OMAP3430_CAM_MOD, PM_WKDEP);
omap2_prm_write_mod_reg(0, OMAP3430_PER_MOD, PM_WKDEP);
if (omap_rev() > OMAP3430_REV_ES1_0) {
omap2_prm_write_mod_reg(0, OMAP3430ES2_SGX_MOD, PM_WKDEP);
omap2_prm_write_mod_reg(0, OMAP3430ES2_USBHOST_MOD, PM_WKDEP);
} else
omap2_prm_write_mod_reg(0, GFX_MOD, PM_WKDEP);
/* /*
* Enable interface clock autoidle for all modules. * Enable interface clock autoidle for all modules.
* Note that in the long run this should be done by clockfw * Note that in the long run this should be done by clockfw
@ -928,8 +908,7 @@ void omap3_pm_off_mode_enable(int enable)
pwrst->pwrdm == core_pwrdm && pwrst->pwrdm == core_pwrdm &&
state == PWRDM_POWER_OFF) { state == PWRDM_POWER_OFF) {
pwrst->next_state = PWRDM_POWER_RET; pwrst->next_state = PWRDM_POWER_RET;
WARN_ONCE(1, pr_warn("%s: Core OFF disabled due to errata i583\n",
"%s: Core OFF disabled due to errata i583\n",
__func__); __func__);
} else { } else {
pwrst->next_state = state; pwrst->next_state = state;

View file

@ -64,6 +64,11 @@
#define SDRC_DLLA_STATUS_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS) #define SDRC_DLLA_STATUS_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS)
#define SDRC_DLLA_CTRL_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_CTRL) #define SDRC_DLLA_CTRL_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_CTRL)
/*
* This file needs be built unconditionally as ARM to interoperate correctly
* with non-Thumb-2-capable firmware.
*/
.arm
/* /*
* API functions * API functions
@ -82,6 +87,8 @@ ENTRY(get_restore_pointer)
stmfd sp!, {lr} @ save registers on stack stmfd sp!, {lr} @ save registers on stack
adr r0, restore adr r0, restore
ldmfd sp!, {pc} @ restore regs and return ldmfd sp!, {pc} @ restore regs and return
ENDPROC(get_restore_pointer)
.align
ENTRY(get_restore_pointer_sz) ENTRY(get_restore_pointer_sz)
.word . - get_restore_pointer .word . - get_restore_pointer
@ -91,6 +98,8 @@ ENTRY(get_omap3630_restore_pointer)
stmfd sp!, {lr} @ save registers on stack stmfd sp!, {lr} @ save registers on stack
adr r0, restore_3630 adr r0, restore_3630
ldmfd sp!, {pc} @ restore regs and return ldmfd sp!, {pc} @ restore regs and return
ENDPROC(get_omap3630_restore_pointer)
.align
ENTRY(get_omap3630_restore_pointer_sz) ENTRY(get_omap3630_restore_pointer_sz)
.word . - get_omap3630_restore_pointer .word . - get_omap3630_restore_pointer
@ -100,6 +109,8 @@ ENTRY(get_es3_restore_pointer)
stmfd sp!, {lr} @ save registers on stack stmfd sp!, {lr} @ save registers on stack
adr r0, restore_es3 adr r0, restore_es3
ldmfd sp!, {pc} @ restore regs and return ldmfd sp!, {pc} @ restore regs and return
ENDPROC(get_es3_restore_pointer)
.align
ENTRY(get_es3_restore_pointer_sz) ENTRY(get_es3_restore_pointer_sz)
.word . - get_es3_restore_pointer .word . - get_es3_restore_pointer
@ -113,8 +124,10 @@ ENTRY(enable_omap3630_toggle_l2_on_restore)
stmfd sp!, {lr} @ save registers on stack stmfd sp!, {lr} @ save registers on stack
/* Setup so that we will disable and enable l2 */ /* Setup so that we will disable and enable l2 */
mov r1, #0x1 mov r1, #0x1
str r1, l2dis_3630 adrl r2, l2dis_3630 @ may be too distant for plain adr
str r1, [r2]
ldmfd sp!, {pc} @ restore regs and return ldmfd sp!, {pc} @ restore regs and return
ENDPROC(enable_omap3630_toggle_l2_on_restore)
.text .text
/* Function to call rom code to save secure ram context */ /* Function to call rom code to save secure ram context */
@ -131,20 +144,22 @@ ENTRY(save_secure_ram_context)
mov r1, #0 @ set task id for ROM code in r1 mov r1, #0 @ set task id for ROM code in r1
mov r2, #4 @ set some flags in r2, r6 mov r2, #4 @ set some flags in r2, r6
mov r6, #0xff mov r6, #0xff
mcr p15, 0, r0, c7, c10, 4 @ data write barrier dsb @ data write barrier
mcr p15, 0, r0, c7, c10, 5 @ data memory barrier dmb @ data memory barrier
.word 0xE1600071 @ call SMI monitor (smi #1) smc #1 @ call SMI monitor (smi #1)
nop nop
nop nop
nop nop
nop nop
ldmfd sp!, {r1-r12, pc} ldmfd sp!, {r1-r12, pc}
.align
sram_phy_addr_mask: sram_phy_addr_mask:
.word SRAM_BASE_P .word SRAM_BASE_P
high_mask: high_mask:
.word 0xffff .word 0xffff
api_params: api_params:
.word 0x4, 0x0, 0x0, 0x1, 0x1 .word 0x4, 0x0, 0x0, 0x1, 0x1
ENDPROC(save_secure_ram_context)
ENTRY(save_secure_ram_context_sz) ENTRY(save_secure_ram_context_sz)
.word . - save_secure_ram_context .word . - save_secure_ram_context
@ -173,12 +188,12 @@ ENTRY(omap34xx_cpu_suspend)
stmfd sp!, {r0-r12, lr} @ save registers on stack stmfd sp!, {r0-r12, lr} @ save registers on stack
/* /*
* r0 contains restore pointer in sdram * r0 contains CPU context save/restore pointer in sdram
* r1 contains information about saving context: * r1 contains information about saving context:
* 0 - No context lost * 0 - No context lost
* 1 - Only L1 and logic lost * 1 - Only L1 and logic lost
* 2 - Only L2 lost * 2 - Only L2 lost (Even L1 is retained we clean it along with L2)
* 3 - Both L1 and L2 lost * 3 - Both L1 and L2 lost and logic lost
*/ */
/* Directly jump to WFI is the context save is not required */ /* Directly jump to WFI is the context save is not required */
@ -199,89 +214,74 @@ save_context_wfi:
beq clean_caches beq clean_caches
l1_logic_lost: l1_logic_lost:
/* Store sp and spsr to SDRAM */ mov r4, sp @ Store sp
mov r4, sp mrs r5, spsr @ Store spsr
mrs r5, spsr mov r6, lr @ Store lr
mov r6, lr
stmia r8!, {r4-r6} stmia r8!, {r4-r6}
/* Save all ARM registers */
/* Coprocessor access control register */
mrc p15, 0, r6, c1, c0, 2
stmia r8!, {r6}
/* TTBR0, TTBR1 and Translation table base control */
mrc p15, 0, r4, c2, c0, 0
mrc p15, 0, r5, c2, c0, 1
mrc p15, 0, r6, c2, c0, 2
stmia r8!, {r4-r6}
/*
* Domain access control register, data fault status register,
* and instruction fault status register
*/
mrc p15, 0, r4, c3, c0, 0
mrc p15, 0, r5, c5, c0, 0
mrc p15, 0, r6, c5, c0, 1
stmia r8!, {r4-r6}
/*
* Data aux fault status register, instruction aux fault status,
* data fault address register and instruction fault address register
*/
mrc p15, 0, r4, c5, c1, 0
mrc p15, 0, r5, c5, c1, 1
mrc p15, 0, r6, c6, c0, 0
mrc p15, 0, r7, c6, c0, 2
stmia r8!, {r4-r7}
/*
* user r/w thread and process ID, user r/o thread and process ID,
* priv only thread and process ID, cache size selection
*/
mrc p15, 0, r4, c13, c0, 2
mrc p15, 0, r5, c13, c0, 3
mrc p15, 0, r6, c13, c0, 4
mrc p15, 2, r7, c0, c0, 0
stmia r8!, {r4-r7}
/* Data TLB lockdown, instruction TLB lockdown registers */
mrc p15, 0, r5, c10, c0, 0
mrc p15, 0, r6, c10, c0, 1
stmia r8!, {r5-r6}
/* Secure or non secure vector base address, FCSE PID, Context PID*/
mrc p15, 0, r4, c12, c0, 0
mrc p15, 0, r5, c13, c0, 0
mrc p15, 0, r6, c13, c0, 1
stmia r8!, {r4-r6}
/* Primary remap, normal remap registers */
mrc p15, 0, r4, c10, c2, 0
mrc p15, 0, r5, c10, c2, 1
stmia r8!,{r4-r5}
/* Store current cpsr*/ mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register
mrs r2, cpsr mrc p15, 0, r5, c2, c0, 0 @ TTBR0
stmia r8!, {r2} mrc p15, 0, r6, c2, c0, 1 @ TTBR1
mrc p15, 0, r7, c2, c0, 2 @ TTBCR
stmia r8!, {r4-r7}
mrc p15, 0, r4, c1, c0, 0 mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register
/* save control register */ mrc p15, 0, r5, c10, c2, 0 @ PRRR
mrc p15, 0, r6, c10, c2, 1 @ NMRR
stmia r8!,{r4-r6}
mrc p15, 0, r4, c13, c0, 1 @ Context ID
mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
mrs r7, cpsr @ Store current cpsr
stmia r8!, {r4-r7}
mrc p15, 0, r4, c1, c0, 0 @ save control register
stmia r8!, {r4} stmia r8!, {r4}
clean_caches: clean_caches:
/*
* Clean Data or unified cache to POU
* How to invalidate only L1 cache???? - #FIX_ME#
* mcr p15, 0, r11, c7, c11, 1
*/
cmp r1, #0x1 @ Check whether L2 inval is required
beq omap3_do_wfi
clean_l2:
/* /*
* jump out to kernel flush routine * jump out to kernel flush routine
* - reuse that code is better * - reuse that code is better
* - it executes in a cached space so is faster than refetch per-block * - it executes in a cached space so is faster than refetch per-block
* - should be faster and will change with kernel * - should be faster and will change with kernel
* - 'might' have to copy address, load and jump to it * - 'might' have to copy address, load and jump to it
* Flush all data from the L1 data cache before disabling
* SCTLR.C bit.
*/ */
ldr r1, kernel_flush ldr r1, kernel_flush
mov lr, pc mov lr, pc
bx r1 bx r1
/*
* Clear the SCTLR.C bit to prevent further data cache
* allocation. Clearing SCTLR.C would make all the data accesses
* strongly ordered and would not hit the cache.
*/
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #(1 << 2) @ Disable the C bit
mcr p15, 0, r0, c1, c0, 0
isb
/*
* Invalidate L1 data cache. Even though only invalidate is
* necessary exported flush API is used here. Doing clean
* on already clean cache would be almost NOP.
*/
ldr r1, kernel_flush
blx r1
/*
* The kernel doesn't interwork: v7_flush_dcache_all in particluar will
* always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled.
* This sequence switches back to ARM. Note that .align may insert a
* nop: bx pc needs to be word-aligned in order to work.
*/
THUMB( .thumb )
THUMB( .align )
THUMB( bx pc )
THUMB( nop )
.arm
omap3_do_wfi: omap3_do_wfi:
ldr r4, sdrc_power @ read the SDRC_POWER register ldr r4, sdrc_power @ read the SDRC_POWER register
ldr r5, [r4] @ read the contents of SDRC_POWER ldr r5, [r4] @ read the contents of SDRC_POWER
@ -289,9 +289,8 @@ omap3_do_wfi:
str r5, [r4] @ write back to SDRC_POWER register str r5, [r4] @ write back to SDRC_POWER register
/* Data memory barrier and Data sync barrier */ /* Data memory barrier and Data sync barrier */
mov r1, #0 dsb
mcr p15, 0, r1, c7, c10, 4 dmb
mcr p15, 0, r1, c7, c10, 5
/* /*
* =================================== * ===================================
@ -317,6 +316,12 @@ omap3_do_wfi:
nop nop
bl wait_sdrc_ok bl wait_sdrc_ok
mrc p15, 0, r0, c1, c0, 0
tst r0, #(1 << 2) @ Check C bit enabled?
orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared
mcreq p15, 0, r0, c1, c0, 0
isb
/* /*
* =================================== * ===================================
* == Exit point from non-OFF modes == * == Exit point from non-OFF modes ==
@ -406,9 +411,9 @@ skipl2dis:
mov r2, #4 @ set some flags in r2, r6 mov r2, #4 @ set some flags in r2, r6
mov r6, #0xff mov r6, #0xff
adr r3, l2_inv_api_params @ r3 points to dummy parameters adr r3, l2_inv_api_params @ r3 points to dummy parameters
mcr p15, 0, r0, c7, c10, 4 @ data write barrier dsb @ data write barrier
mcr p15, 0, r0, c7, c10, 5 @ data memory barrier dmb @ data memory barrier
.word 0xE1600071 @ call SMI monitor (smi #1) smc #1 @ call SMI monitor (smi #1)
/* Write to Aux control register to set some bits */ /* Write to Aux control register to set some bits */
mov r0, #42 @ set service ID for PPA mov r0, #42 @ set service ID for PPA
mov r12, r0 @ copy secure Service ID in r12 mov r12, r0 @ copy secure Service ID in r12
@ -417,9 +422,9 @@ skipl2dis:
mov r6, #0xff mov r6, #0xff
ldr r4, scratchpad_base ldr r4, scratchpad_base
ldr r3, [r4, #0xBC] @ r3 points to parameters ldr r3, [r4, #0xBC] @ r3 points to parameters
mcr p15, 0, r0, c7, c10, 4 @ data write barrier dsb @ data write barrier
mcr p15, 0, r0, c7, c10, 5 @ data memory barrier dmb @ data memory barrier
.word 0xE1600071 @ call SMI monitor (smi #1) smc #1 @ call SMI monitor (smi #1)
#ifdef CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE #ifdef CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE
/* Restore L2 aux control register */ /* Restore L2 aux control register */
@ -432,29 +437,30 @@ skipl2dis:
ldr r4, scratchpad_base ldr r4, scratchpad_base
ldr r3, [r4, #0xBC] ldr r3, [r4, #0xBC]
adds r3, r3, #8 @ r3 points to parameters adds r3, r3, #8 @ r3 points to parameters
mcr p15, 0, r0, c7, c10, 4 @ data write barrier dsb @ data write barrier
mcr p15, 0, r0, c7, c10, 5 @ data memory barrier dmb @ data memory barrier
.word 0xE1600071 @ call SMI monitor (smi #1) smc #1 @ call SMI monitor (smi #1)
#endif #endif
b logic_l1_restore b logic_l1_restore
.align
l2_inv_api_params: l2_inv_api_params:
.word 0x1, 0x00 .word 0x1, 0x00
l2_inv_gp: l2_inv_gp:
/* Execute smi to invalidate L2 cache */ /* Execute smi to invalidate L2 cache */
mov r12, #0x1 @ set up to invalidate L2 mov r12, #0x1 @ set up to invalidate L2
.word 0xE1600070 @ Call SMI monitor (smieq) smc #0 @ Call SMI monitor (smieq)
/* Write to Aux control register to set some bits */ /* Write to Aux control register to set some bits */
ldr r4, scratchpad_base ldr r4, scratchpad_base
ldr r3, [r4,#0xBC] ldr r3, [r4,#0xBC]
ldr r0, [r3,#4] ldr r0, [r3,#4]
mov r12, #0x3 mov r12, #0x3
.word 0xE1600070 @ Call SMI monitor (smieq) smc #0 @ Call SMI monitor (smieq)
ldr r4, scratchpad_base ldr r4, scratchpad_base
ldr r3, [r4,#0xBC] ldr r3, [r4,#0xBC]
ldr r0, [r3,#12] ldr r0, [r3,#12]
mov r12, #0x2 mov r12, #0x2
.word 0xE1600070 @ Call SMI monitor (smieq) smc #0 @ Call SMI monitor (smieq)
logic_l1_restore: logic_l1_restore:
ldr r1, l2dis_3630 ldr r1, l2dis_3630
cmp r1, #0x1 @ Test if L2 re-enable needed on 3630 cmp r1, #0x1 @ Test if L2 re-enable needed on 3630
@ -473,68 +479,29 @@ skipl2reen:
ldr r4, scratchpad_base ldr r4, scratchpad_base
ldr r3, [r4,#0xBC] ldr r3, [r4,#0xBC]
adds r3, r3, #16 adds r3, r3, #16
ldmia r3!, {r4-r6} ldmia r3!, {r4-r6}
mov sp, r4 mov sp, r4 @ Restore sp
msr spsr_cxsf, r5 msr spsr_cxsf, r5 @ Restore spsr
mov lr, r6 mov lr, r6 @ Restore lr
ldmia r3!, {r4-r9} ldmia r3!, {r4-r7}
/* Coprocessor access Control Register */ mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register
mcr p15, 0, r4, c1, c0, 2 mcr p15, 0, r5, c2, c0, 0 @ TTBR0
mcr p15, 0, r6, c2, c0, 1 @ TTBR1
mcr p15, 0, r7, c2, c0, 2 @ TTBCR
ldmia r3!,{r4-r6}
mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register
mcr p15, 0, r5, c10, c2, 0 @ PRRR
mcr p15, 0, r6, c10, c2, 1 @ NMRR
/* TTBR0 */
MCR p15, 0, r5, c2, c0, 0
/* TTBR1 */
MCR p15, 0, r6, c2, c0, 1
/* Translation table base control register */
MCR p15, 0, r7, c2, c0, 2
/* Domain access Control Register */
MCR p15, 0, r8, c3, c0, 0
/* Data fault status Register */
MCR p15, 0, r9, c5, c0, 0
ldmia r3!,{r4-r8}
/* Instruction fault status Register */
MCR p15, 0, r4, c5, c0, 1
/* Data Auxiliary Fault Status Register */
MCR p15, 0, r5, c5, c1, 0
/* Instruction Auxiliary Fault Status Register*/
MCR p15, 0, r6, c5, c1, 1
/* Data Fault Address Register */
MCR p15, 0, r7, c6, c0, 0
/* Instruction Fault Address Register*/
MCR p15, 0, r8, c6, c0, 2
ldmia r3!,{r4-r7} ldmia r3!,{r4-r7}
mcr p15, 0, r4, c13, c0, 1 @ Context ID
/* User r/w thread and process ID */ mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
MCR p15, 0, r4, c13, c0, 2 mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
/* User ro thread and process ID */ msr cpsr, r7 @ store cpsr
MCR p15, 0, r5, c13, c0, 3
/* Privileged only thread and process ID */
MCR p15, 0, r6, c13, c0, 4
/* Cache size selection */
MCR p15, 2, r7, c0, c0, 0
ldmia r3!,{r4-r8}
/* Data TLB lockdown registers */
MCR p15, 0, r4, c10, c0, 0
/* Instruction TLB lockdown registers */
MCR p15, 0, r5, c10, c0, 1
/* Secure or Nonsecure Vector Base Address */
MCR p15, 0, r6, c12, c0, 0
/* FCSE PID */
MCR p15, 0, r7, c13, c0, 0
/* Context PID */
MCR p15, 0, r8, c13, c0, 1
ldmia r3!,{r4-r5}
/* Primary memory remap register */
MCR p15, 0, r4, c10, c2, 0
/* Normal memory remap register */
MCR p15, 0, r5, c10, c2, 1
/* Restore cpsr */
ldmia r3!,{r4} @ load CPSR from SDRAM
msr cpsr, r4 @ store cpsr
/* Enabling MMU here */ /* Enabling MMU here */
mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
@ -592,12 +559,17 @@ usettbr0:
ldr r2, cache_pred_disable_mask ldr r2, cache_pred_disable_mask
and r4, r2 and r4, r2
mcr p15, 0, r4, c1, c0, 0 mcr p15, 0, r4, c1, c0, 0
dsb
isb
ldr r0, =restoremmu_on
bx r0
/* /*
* ============================== * ==============================
* == Exit point from OFF mode == * == Exit point from OFF mode ==
* ============================== * ==============================
*/ */
restoremmu_on:
ldmfd sp!, {r0-r12, pc} @ restore regs and return ldmfd sp!, {r0-r12, pc} @ restore regs and return
@ -607,6 +579,7 @@ usettbr0:
/* This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 */ /* This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 */
.text .text
.align 3
ENTRY(es3_sdrc_fix) ENTRY(es3_sdrc_fix)
ldr r4, sdrc_syscfg @ get config addr ldr r4, sdrc_syscfg @ get config addr
ldr r5, [r4] @ get value ldr r5, [r4] @ get value
@ -634,6 +607,7 @@ ENTRY(es3_sdrc_fix)
str r5, [r4] @ kick off refreshes str r5, [r4] @ kick off refreshes
bx lr bx lr
.align
sdrc_syscfg: sdrc_syscfg:
.word SDRC_SYSCONFIG_P .word SDRC_SYSCONFIG_P
sdrc_mr_0: sdrc_mr_0:
@ -648,6 +622,7 @@ sdrc_emr2_1:
.word SDRC_EMR2_1_P .word SDRC_EMR2_1_P
sdrc_manual_1: sdrc_manual_1:
.word SDRC_MANUAL_1_P .word SDRC_MANUAL_1_P
ENDPROC(es3_sdrc_fix)
ENTRY(es3_sdrc_fix_sz) ENTRY(es3_sdrc_fix_sz)
.word . - es3_sdrc_fix .word . - es3_sdrc_fix
@ -682,6 +657,12 @@ wait_sdrc_ready:
bic r5, r5, #0x40 bic r5, r5, #0x40
str r5, [r4] str r5, [r4]
/*
* PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
* base instead.
* Be careful not to clobber r7 when maintaing this code.
*/
is_dll_in_lock_mode: is_dll_in_lock_mode:
/* Is dll in lock mode? */ /* Is dll in lock mode? */
ldr r4, sdrc_dlla_ctrl ldr r4, sdrc_dlla_ctrl
@ -689,10 +670,11 @@ is_dll_in_lock_mode:
tst r5, #0x4 tst r5, #0x4
bxne lr @ Return if locked bxne lr @ Return if locked
/* wait till dll locks */ /* wait till dll locks */
adr r7, kick_counter
wait_dll_lock_timed: wait_dll_lock_timed:
ldr r4, wait_dll_lock_counter ldr r4, wait_dll_lock_counter
add r4, r4, #1 add r4, r4, #1
str r4, wait_dll_lock_counter str r4, [r7, #wait_dll_lock_counter - kick_counter]
ldr r4, sdrc_dlla_status ldr r4, sdrc_dlla_status
/* Wait 20uS for lock */ /* Wait 20uS for lock */
mov r6, #8 mov r6, #8
@ -718,9 +700,10 @@ kick_dll:
dsb dsb
ldr r4, kick_counter ldr r4, kick_counter
add r4, r4, #1 add r4, r4, #1
str r4, kick_counter str r4, [r7] @ kick_counter
b wait_dll_lock_timed b wait_dll_lock_timed
.align
cm_idlest1_core: cm_idlest1_core:
.word CM_IDLEST1_CORE_V .word CM_IDLEST1_CORE_V
cm_idlest_ckgen: cm_idlest_ckgen:
@ -763,6 +746,7 @@ kick_counter:
.word 0 .word 0
wait_dll_lock_counter: wait_dll_lock_counter:
.word 0 .word 0
ENDPROC(omap34xx_cpu_suspend)
ENTRY(omap34xx_cpu_suspend_sz) ENTRY(omap34xx_cpu_suspend_sz)
.word . - omap34xx_cpu_suspend .word . - omap34xx_cpu_suspend

View file

@ -54,6 +54,7 @@ struct omap_sr {
struct list_head node; struct list_head node;
struct omap_sr_nvalue_table *nvalue_table; struct omap_sr_nvalue_table *nvalue_table;
struct voltagedomain *voltdm; struct voltagedomain *voltdm;
struct dentry *dbg_dir;
}; };
/* sr_list contains all the instances of smartreflex module */ /* sr_list contains all the instances of smartreflex module */
@ -260,9 +261,11 @@ static int sr_late_init(struct omap_sr *sr_info)
if (sr_class->class_type == SR_CLASS2 && if (sr_class->class_type == SR_CLASS2 &&
sr_class->notify_flags && sr_info->irq) { sr_class->notify_flags && sr_info->irq) {
name = kzalloc(SMARTREFLEX_NAME_LEN + 1, GFP_KERNEL); name = kasprintf(GFP_KERNEL, "sr_%s", sr_info->voltdm->name);
strcpy(name, "sr_"); if (name == NULL) {
strcat(name, sr_info->voltdm->name); ret = -ENOMEM;
goto error;
}
ret = request_irq(sr_info->irq, sr_interrupt, ret = request_irq(sr_info->irq, sr_interrupt,
0, name, (void *)sr_info); 0, name, (void *)sr_info);
if (ret) if (ret)
@ -282,6 +285,7 @@ error:
dev_err(&sr_info->pdev->dev, "%s: ERROR in registering" dev_err(&sr_info->pdev->dev, "%s: ERROR in registering"
"interrupt handler. Smartreflex will" "interrupt handler. Smartreflex will"
"not function as desired\n", __func__); "not function as desired\n", __func__);
kfree(name);
kfree(sr_info); kfree(sr_info);
return ret; return ret;
} }
@ -820,7 +824,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
struct omap_sr *sr_info = kzalloc(sizeof(struct omap_sr), GFP_KERNEL); struct omap_sr *sr_info = kzalloc(sizeof(struct omap_sr), GFP_KERNEL);
struct omap_sr_data *pdata = pdev->dev.platform_data; struct omap_sr_data *pdata = pdev->dev.platform_data;
struct resource *mem, *irq; struct resource *mem, *irq;
struct dentry *vdd_dbg_dir, *dbg_dir, *nvalue_dir; struct dentry *vdd_dbg_dir, *nvalue_dir;
struct omap_volt_data *volt_data; struct omap_volt_data *volt_data;
int i, ret = 0; int i, ret = 0;
@ -879,7 +883,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
ret = sr_late_init(sr_info); ret = sr_late_init(sr_info);
if (ret) { if (ret) {
pr_warning("%s: Error in SR late init\n", __func__); pr_warning("%s: Error in SR late init\n", __func__);
return ret; goto err_release_region;
} }
} }
@ -890,30 +894,34 @@ static int __init omap_sr_probe(struct platform_device *pdev)
* not try to create rest of the debugfs entries. * not try to create rest of the debugfs entries.
*/ */
vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm); vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm);
if (!vdd_dbg_dir) if (!vdd_dbg_dir) {
return -EINVAL; ret = -EINVAL;
goto err_release_region;
dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir);
if (IS_ERR(dbg_dir)) {
dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n",
__func__);
return PTR_ERR(dbg_dir);
} }
(void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir, sr_info->dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir);
(void *)sr_info, &pm_sr_fops); if (IS_ERR(sr_info->dbg_dir)) {
(void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir, dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n",
__func__);
ret = PTR_ERR(sr_info->dbg_dir);
goto err_release_region;
}
(void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR,
sr_info->dbg_dir, (void *)sr_info, &pm_sr_fops);
(void) debugfs_create_x32("errweight", S_IRUGO, sr_info->dbg_dir,
&sr_info->err_weight); &sr_info->err_weight);
(void) debugfs_create_x32("errmaxlimit", S_IRUGO, dbg_dir, (void) debugfs_create_x32("errmaxlimit", S_IRUGO, sr_info->dbg_dir,
&sr_info->err_maxlimit); &sr_info->err_maxlimit);
(void) debugfs_create_x32("errminlimit", S_IRUGO, dbg_dir, (void) debugfs_create_x32("errminlimit", S_IRUGO, sr_info->dbg_dir,
&sr_info->err_minlimit); &sr_info->err_minlimit);
nvalue_dir = debugfs_create_dir("nvalue", dbg_dir); nvalue_dir = debugfs_create_dir("nvalue", sr_info->dbg_dir);
if (IS_ERR(nvalue_dir)) { if (IS_ERR(nvalue_dir)) {
dev_err(&pdev->dev, "%s: Unable to create debugfs directory" dev_err(&pdev->dev, "%s: Unable to create debugfs directory"
"for n-values\n", __func__); "for n-values\n", __func__);
return PTR_ERR(nvalue_dir); ret = PTR_ERR(nvalue_dir);
goto err_release_region;
} }
omap_voltage_get_volttable(sr_info->voltdm, &volt_data); omap_voltage_get_volttable(sr_info->voltdm, &volt_data);
@ -922,23 +930,15 @@ static int __init omap_sr_probe(struct platform_device *pdev)
" corresponding vdd vdd_%s. Cannot create debugfs" " corresponding vdd vdd_%s. Cannot create debugfs"
"entries for n-values\n", "entries for n-values\n",
__func__, sr_info->voltdm->name); __func__, sr_info->voltdm->name);
return -ENODATA; ret = -ENODATA;
goto err_release_region;
} }
for (i = 0; i < sr_info->nvalue_count; i++) { for (i = 0; i < sr_info->nvalue_count; i++) {
char *name; char name[NVALUE_NAME_LEN + 1];
char volt_name[32];
name = kzalloc(NVALUE_NAME_LEN + 1, GFP_KERNEL); snprintf(name, sizeof(name), "volt_%d",
if (!name) { volt_data[i].volt_nominal);
dev_err(&pdev->dev, "%s: Unable to allocate memory"
" for n-value directory name\n", __func__);
return -ENOMEM;
}
strcpy(name, "volt_");
sprintf(volt_name, "%d", volt_data[i].volt_nominal);
strcat(name, volt_name);
(void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir, (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
&(sr_info->nvalue_table[i].nvalue)); &(sr_info->nvalue_table[i].nvalue));
} }
@ -973,6 +973,8 @@ static int __devexit omap_sr_remove(struct platform_device *pdev)
if (sr_info->autocomp_active) if (sr_info->autocomp_active)
sr_stop_vddautocomp(sr_info); sr_stop_vddautocomp(sr_info);
if (sr_info->dbg_dir)
debugfs_remove_recursive(sr_info->dbg_dir);
list_del(&sr_info->node); list_del(&sr_info->node);
iounmap(sr_info->base); iounmap(sr_info->base);

View file

@ -34,6 +34,12 @@
#include "sdrc.h" #include "sdrc.h"
#include "cm2xxx_3xxx.h" #include "cm2xxx_3xxx.h"
/*
* This file needs be built unconditionally as ARM to interoperate correctly
* with non-Thumb-2-capable firmware.
*/
.arm
.text .text
/* r1 parameters */ /* r1 parameters */
@ -116,24 +122,36 @@ ENTRY(omap3_sram_configure_core_dpll)
@ pull the extra args off the stack @ pull the extra args off the stack
@ and store them in SRAM @ and store them in SRAM
/*
* PC-relative stores are deprecated in ARMv7 and lead to undefined behaviour
* in Thumb-2: use a r7 as a base instead.
* Be careful not to clobber r7 when maintaing this file.
*/
THUMB( adr r7, omap3_sram_configure_core_dpll )
.macro strtext Rt:req, label:req
ARM( str \Rt, \label )
THUMB( str \Rt, [r7, \label - omap3_sram_configure_core_dpll] )
.endm
ldr r4, [sp, #52] ldr r4, [sp, #52]
str r4, omap_sdrc_rfr_ctrl_0_val strtext r4, omap_sdrc_rfr_ctrl_0_val
ldr r4, [sp, #56] ldr r4, [sp, #56]
str r4, omap_sdrc_actim_ctrl_a_0_val strtext r4, omap_sdrc_actim_ctrl_a_0_val
ldr r4, [sp, #60] ldr r4, [sp, #60]
str r4, omap_sdrc_actim_ctrl_b_0_val strtext r4, omap_sdrc_actim_ctrl_b_0_val
ldr r4, [sp, #64] ldr r4, [sp, #64]
str r4, omap_sdrc_mr_0_val strtext r4, omap_sdrc_mr_0_val
ldr r4, [sp, #68] ldr r4, [sp, #68]
str r4, omap_sdrc_rfr_ctrl_1_val strtext r4, omap_sdrc_rfr_ctrl_1_val
cmp r4, #0 @ if SDRC_RFR_CTRL_1 is 0, cmp r4, #0 @ if SDRC_RFR_CTRL_1 is 0,
beq skip_cs1_params @ do not use cs1 params beq skip_cs1_params @ do not use cs1 params
ldr r4, [sp, #72] ldr r4, [sp, #72]
str r4, omap_sdrc_actim_ctrl_a_1_val strtext r4, omap_sdrc_actim_ctrl_a_1_val
ldr r4, [sp, #76] ldr r4, [sp, #76]
str r4, omap_sdrc_actim_ctrl_b_1_val strtext r4, omap_sdrc_actim_ctrl_b_1_val
ldr r4, [sp, #80] ldr r4, [sp, #80]
str r4, omap_sdrc_mr_1_val strtext r4, omap_sdrc_mr_1_val
skip_cs1_params: skip_cs1_params:
mrc p15, 0, r8, c1, c0, 0 @ read ctrl register mrc p15, 0, r8, c1, c0, 0 @ read ctrl register
bic r10, r8, #0x800 @ clear Z-bit, disable branch prediction bic r10, r8, #0x800 @ clear Z-bit, disable branch prediction
@ -271,6 +289,7 @@ skip_cs1_prog:
ldr r12, [r11] @ posted-write barrier for SDRC ldr r12, [r11] @ posted-write barrier for SDRC
bx lr bx lr
.align
omap3_sdrc_power: omap3_sdrc_power:
.word OMAP34XX_SDRC_REGADDR(SDRC_POWER) .word OMAP34XX_SDRC_REGADDR(SDRC_POWER)
omap3_cm_clksel1_pll: omap3_cm_clksel1_pll:
@ -319,6 +338,7 @@ omap3_sdrc_dlla_ctrl:
.word OMAP34XX_SDRC_REGADDR(SDRC_DLLA_CTRL) .word OMAP34XX_SDRC_REGADDR(SDRC_DLLA_CTRL)
core_m2_mask_val: core_m2_mask_val:
.word 0x07FFFFFF .word 0x07FFFFFF
ENDPROC(omap3_sram_configure_core_dpll)
ENTRY(omap3_sram_configure_core_dpll_sz) ENTRY(omap3_sram_configure_core_dpll_sz)
.word . - omap3_sram_configure_core_dpll .word . - omap3_sram_configure_core_dpll

View file

@ -347,6 +347,7 @@ static struct platform_device *pxa25x_devices[] __initdata = {
&pxa25x_device_assp, &pxa25x_device_assp,
&pxa25x_device_pwm0, &pxa25x_device_pwm0,
&pxa25x_device_pwm1, &pxa25x_device_pwm1,
&pxa_device_asoc_platform,
}; };
static struct sys_device pxa25x_sysdev[] = { static struct sys_device pxa25x_sysdev[] = {

View file

@ -81,8 +81,6 @@ static int tosa_bt_probe(struct platform_device *dev)
goto err_rfk_alloc; goto err_rfk_alloc;
} }
rfkill_set_led_trigger_name(rfk, "tosa-bt");
rc = rfkill_register(rfk); rc = rfkill_register(rfk);
if (rc) if (rc)
goto err_rfkill; goto err_rfkill;

View file

@ -875,6 +875,11 @@ static struct platform_device sharpsl_rom_device = {
.dev.platform_data = &sharpsl_rom_data, .dev.platform_data = &sharpsl_rom_data,
}; };
static struct platform_device wm9712_device = {
.name = "wm9712-codec",
.id = -1,
};
static struct platform_device *devices[] __initdata = { static struct platform_device *devices[] __initdata = {
&tosascoop_device, &tosascoop_device,
&tosascoop_jc_device, &tosascoop_jc_device,
@ -885,6 +890,7 @@ static struct platform_device *devices[] __initdata = {
&tosaled_device, &tosaled_device,
&tosa_bt_device, &tosa_bt_device,
&sharpsl_rom_device, &sharpsl_rom_device,
&wm9712_device,
}; };
static void tosa_poweroff(void) static void tosa_poweroff(void)

View file

@ -99,6 +99,7 @@ config MACH_NEO1973_GTA02
select POWER_SUPPLY select POWER_SUPPLY
select MACH_NEO1973 select MACH_NEO1973
select S3C2410_PWM select S3C2410_PWM
select S3C_DEV_USB_HOST
help help
Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone

View file

@ -44,19 +44,19 @@
#define GTA02v3_GPIO_nUSB_FLT S3C2410_GPG(10) /* v3 + v4 only */ #define GTA02v3_GPIO_nUSB_FLT S3C2410_GPG(10) /* v3 + v4 only */
#define GTA02v3_GPIO_nGSM_OC S3C2410_GPG(11) /* v3 + v4 only */ #define GTA02v3_GPIO_nGSM_OC S3C2410_GPG(11) /* v3 + v4 only */
#define GTA02_GPIO_AMP_SHUT S3C2440_GPJ1 /* v2 + v3 + v4 only */ #define GTA02_GPIO_AMP_SHUT S3C2410_GPJ(1) /* v2 + v3 + v4 only */
#define GTA02v1_GPIO_WLAN_GPIO10 S3C2440_GPJ2 #define GTA02v1_GPIO_WLAN_GPIO10 S3C2410_GPJ(2)
#define GTA02_GPIO_HP_IN S3C2440_GPJ2 /* v2 + v3 + v4 only */ #define GTA02_GPIO_HP_IN S3C2410_GPJ(2) /* v2 + v3 + v4 only */
#define GTA02_GPIO_INT0 S3C2440_GPJ3 /* v2 + v3 + v4 only */ #define GTA02_GPIO_INT0 S3C2410_GPJ(3) /* v2 + v3 + v4 only */
#define GTA02_GPIO_nGSM_EN S3C2440_GPJ4 #define GTA02_GPIO_nGSM_EN S3C2410_GPJ(4)
#define GTA02_GPIO_3D_RESET S3C2440_GPJ5 #define GTA02_GPIO_3D_RESET S3C2410_GPJ(5)
#define GTA02_GPIO_nDL_GSM S3C2440_GPJ6 /* v4 + v5 only */ #define GTA02_GPIO_nDL_GSM S3C2410_GPJ(6) /* v4 + v5 only */
#define GTA02_GPIO_WLAN_GPIO0 S3C2440_GPJ7 #define GTA02_GPIO_WLAN_GPIO0 S3C2410_GPJ(7)
#define GTA02v1_GPIO_BAT_ID S3C2440_GPJ8 #define GTA02v1_GPIO_BAT_ID S3C2410_GPJ(8)
#define GTA02_GPIO_KEEPACT S3C2440_GPJ8 #define GTA02_GPIO_KEEPACT S3C2410_GPJ(8)
#define GTA02v1_GPIO_HP_IN S3C2440_GPJ10 #define GTA02v1_GPIO_HP_IN S3C2410_GPJ(10)
#define GTA02_CHIP_PWD S3C2440_GPJ11 /* v2 + v3 + v4 only */ #define GTA02_CHIP_PWD S3C2410_GPJ(11) /* v2 + v3 + v4 only */
#define GTA02_GPIO_nWLAN_RESET S3C2440_GPJ12 /* v2 + v3 + v4 only */ #define GTA02_GPIO_nWLAN_RESET S3C2410_GPJ(12) /* v2 + v3 + v4 only */
#define GTA02_IRQ_GSENSOR_1 IRQ_EINT0 #define GTA02_IRQ_GSENSOR_1 IRQ_EINT0
#define GTA02_IRQ_MODEM IRQ_EINT1 #define GTA02_IRQ_MODEM IRQ_EINT1

View file

@ -150,6 +150,12 @@ static struct clk init_clocks_off[] = {
.parent = &clk_p, .parent = &clk_p,
.enable = s3c64xx_pclk_ctrl, .enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C_CLKCON_PCLK_IIC, .ctrlbit = S3C_CLKCON_PCLK_IIC,
}, {
.name = "i2c",
.id = 1,
.parent = &clk_p,
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C6410_CLKCON_PCLK_I2C1,
}, { }, {
.name = "iis", .name = "iis",
.id = 0, .id = 0,

View file

@ -690,12 +690,12 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
regptr = regs + PL080_Cx_BASE(0); regptr = regs + PL080_Cx_BASE(0);
for (ch = 0; ch < 8; ch++, chno++, chptr++) { for (ch = 0; ch < 8; ch++, chptr++) {
printk(KERN_INFO "%s: registering DMA %d (%p)\n", pr_debug("%s: registering DMA %d (%p)\n",
__func__, chno, regptr); __func__, chno + ch, regptr);
chptr->bit = 1 << ch; chptr->bit = 1 << ch;
chptr->number = chno; chptr->number = chno + ch;
chptr->dmac = dmac; chptr->dmac = dmac;
chptr->regs = regptr; chptr->regs = regptr;
regptr += PL080_Cx_STRIDE; regptr += PL080_Cx_STRIDE;
@ -704,7 +704,8 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
/* for the moment, permanently enable the controller */ /* for the moment, permanently enable the controller */
writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG); writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG);
printk(KERN_INFO "PL080: IRQ %d, at %p\n", irq, regs); printk(KERN_INFO "PL080: IRQ %d, at %p, channels %d..%d\n",
irq, regs, chno, chno+8);
return 0; return 0;

View file

@ -72,7 +72,7 @@ static struct s3c_gpio_cfg gpio_4bit_cfg_eint0011 = {
.get_pull = s3c_gpio_getpull_updown, .get_pull = s3c_gpio_getpull_updown,
}; };
int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin) static int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin)
{ {
return pin < 5 ? IRQ_EINT(23) + pin : -ENXIO; return pin < 5 ? IRQ_EINT(23) + pin : -ENXIO;
} }
@ -138,7 +138,7 @@ static struct s3c_gpio_chip gpio_4bit[] = {
}, },
}; };
int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin) static int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin)
{ {
return pin >= 8 ? IRQ_EINT(16) + pin - 8 : -ENXIO; return pin >= 8 ? IRQ_EINT(16) + pin - 8 : -ENXIO;
} }

View file

@ -28,6 +28,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/smsc911x.h> #include <linux/smsc911x.h>
#include <linux/regulator/fixed.h> #include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#ifdef CONFIG_SMDK6410_WM1190_EV1 #ifdef CONFIG_SMDK6410_WM1190_EV1
#include <linux/mfd/wm8350/core.h> #include <linux/mfd/wm8350/core.h>
@ -351,7 +352,7 @@ static struct regulator_init_data smdk6410_vddpll = {
/* VDD_UH_MMC, LDO5 on J5 */ /* VDD_UH_MMC, LDO5 on J5 */
static struct regulator_init_data smdk6410_vdduh_mmc = { static struct regulator_init_data smdk6410_vdduh_mmc = {
.constraints = { .constraints = {
.name = "PVDD_UH/PVDD_MMC", .name = "PVDD_UH+PVDD_MMC",
.always_on = 1, .always_on = 1,
}, },
}; };
@ -417,7 +418,7 @@ static struct regulator_init_data smdk6410_vddaudio = {
/* S3C64xx internal logic & PLL */ /* S3C64xx internal logic & PLL */
static struct regulator_init_data wm8350_dcdc1_data = { static struct regulator_init_data wm8350_dcdc1_data = {
.constraints = { .constraints = {
.name = "PVDD_INT/PVDD_PLL", .name = "PVDD_INT+PVDD_PLL",
.min_uV = 1200000, .min_uV = 1200000,
.max_uV = 1200000, .max_uV = 1200000,
.always_on = 1, .always_on = 1,
@ -452,7 +453,7 @@ static struct regulator_consumer_supply wm8350_dcdc4_consumers[] = {
static struct regulator_init_data wm8350_dcdc4_data = { static struct regulator_init_data wm8350_dcdc4_data = {
.constraints = { .constraints = {
.name = "PVDD_HI/PVDD_EXT/PVDD_SYS/PVCCM2MTV", .name = "PVDD_HI+PVDD_EXT+PVDD_SYS+PVCCM2MTV",
.min_uV = 3000000, .min_uV = 3000000,
.max_uV = 3000000, .max_uV = 3000000,
.always_on = 1, .always_on = 1,
@ -464,7 +465,7 @@ static struct regulator_init_data wm8350_dcdc4_data = {
/* OTGi/1190-EV1 HPVDD & AVDD */ /* OTGi/1190-EV1 HPVDD & AVDD */
static struct regulator_init_data wm8350_ldo4_data = { static struct regulator_init_data wm8350_ldo4_data = {
.constraints = { .constraints = {
.name = "PVDD_OTGI/HPVDD/AVDD", .name = "PVDD_OTGI+HPVDD+AVDD",
.min_uV = 1200000, .min_uV = 1200000,
.max_uV = 1200000, .max_uV = 1200000,
.apply_uV = 1, .apply_uV = 1,
@ -552,7 +553,7 @@ static struct wm831x_backlight_pdata wm1192_backlight_pdata = {
static struct regulator_init_data wm1192_dcdc3 = { static struct regulator_init_data wm1192_dcdc3 = {
.constraints = { .constraints = {
.name = "PVDD_MEM/PVDD_GPS", .name = "PVDD_MEM+PVDD_GPS",
.always_on = 1, .always_on = 1,
}, },
}; };
@ -563,7 +564,7 @@ static struct regulator_consumer_supply wm1192_ldo1_consumers[] = {
static struct regulator_init_data wm1192_ldo1 = { static struct regulator_init_data wm1192_ldo1 = {
.constraints = { .constraints = {
.name = "PVDD_LCD/PVDD_EXT", .name = "PVDD_LCD+PVDD_EXT",
.always_on = 1, .always_on = 1,
}, },
.consumer_supplies = wm1192_ldo1_consumers, .consumer_supplies = wm1192_ldo1_consumers,

View file

@ -17,7 +17,7 @@
void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols) void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols)
{ {
/* Set all the necessary GPK pins to special-function 3: KP_ROW[x] */ /* Set all the necessary GPK pins to special-function 3: KP_ROW[x] */
s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), 8 + rows, S3C_GPIO_SFN(3)); s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), rows, S3C_GPIO_SFN(3));
/* Set all the necessary GPL pins to special-function 3: KP_COL[x] */ /* Set all the necessary GPL pins to special-function 3: KP_COL[x] */
s3c_gpio_cfgrange_nopull(S3C64XX_GPL(0), cols, S3C_GPIO_SFN(3)); s3c_gpio_cfgrange_nopull(S3C64XX_GPL(0), cols, S3C_GPIO_SFN(3));

View file

@ -56,7 +56,7 @@ void s3c6400_setup_sdhci_cfg_card(struct platform_device *dev,
else else
ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0); ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0);
printk(KERN_INFO "%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3); pr_debug("%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3);
writel(ctrl2, r + S3C_SDHCI_CONTROL2); writel(ctrl2, r + S3C_SDHCI_CONTROL2);
writel(ctrl3, r + S3C_SDHCI_CONTROL3); writel(ctrl3, r + S3C_SDHCI_CONTROL3);
} }

View file

@ -23,7 +23,7 @@
#define S5P6440_GPIO_A_NR (6) #define S5P6440_GPIO_A_NR (6)
#define S5P6440_GPIO_B_NR (7) #define S5P6440_GPIO_B_NR (7)
#define S5P6440_GPIO_C_NR (8) #define S5P6440_GPIO_C_NR (8)
#define S5P6440_GPIO_F_NR (2) #define S5P6440_GPIO_F_NR (16)
#define S5P6440_GPIO_G_NR (7) #define S5P6440_GPIO_G_NR (7)
#define S5P6440_GPIO_H_NR (10) #define S5P6440_GPIO_H_NR (10)
#define S5P6440_GPIO_I_NR (16) #define S5P6440_GPIO_I_NR (16)
@ -36,7 +36,7 @@
#define S5P6450_GPIO_B_NR (7) #define S5P6450_GPIO_B_NR (7)
#define S5P6450_GPIO_C_NR (8) #define S5P6450_GPIO_C_NR (8)
#define S5P6450_GPIO_D_NR (8) #define S5P6450_GPIO_D_NR (8)
#define S5P6450_GPIO_F_NR (2) #define S5P6450_GPIO_F_NR (16)
#define S5P6450_GPIO_G_NR (14) #define S5P6450_GPIO_G_NR (14)
#define S5P6450_GPIO_H_NR (10) #define S5P6450_GPIO_H_NR (10)
#define S5P6450_GPIO_I_NR (16) #define S5P6450_GPIO_I_NR (16)

View file

@ -454,6 +454,7 @@ static void __init ag5evm_init(void)
gpio_direction_output(GPIO_PORT217, 0); gpio_direction_output(GPIO_PORT217, 0);
mdelay(1); mdelay(1);
gpio_set_value(GPIO_PORT217, 1); gpio_set_value(GPIO_PORT217, 1);
mdelay(100);
/* LCD backlight controller */ /* LCD backlight controller */
gpio_request(GPIO_PORT235, NULL); /* RESET */ gpio_request(GPIO_PORT235, NULL); /* RESET */

View file

@ -1303,7 +1303,7 @@ static void __init ap4evb_init(void)
lcdc_info.clock_source = LCDC_CLK_BUS; lcdc_info.clock_source = LCDC_CLK_BUS;
lcdc_info.ch[0].interface_type = RGB18; lcdc_info.ch[0].interface_type = RGB18;
lcdc_info.ch[0].clock_divider = 2; lcdc_info.ch[0].clock_divider = 3;
lcdc_info.ch[0].flags = 0; lcdc_info.ch[0].flags = 0;
lcdc_info.ch[0].lcd_size_cfg.width = 152; lcdc_info.ch[0].lcd_size_cfg.width = 152;
lcdc_info.ch[0].lcd_size_cfg.height = 91; lcdc_info.ch[0].lcd_size_cfg.height = 91;

View file

@ -303,7 +303,7 @@ static struct sh_mobile_lcdc_info lcdc_info = {
.lcd_cfg = mackerel_lcdc_modes, .lcd_cfg = mackerel_lcdc_modes,
.num_cfg = ARRAY_SIZE(mackerel_lcdc_modes), .num_cfg = ARRAY_SIZE(mackerel_lcdc_modes),
.interface_type = RGB24, .interface_type = RGB24,
.clock_divider = 2, .clock_divider = 3,
.flags = 0, .flags = 0,
.lcd_size_cfg.width = 152, .lcd_size_cfg.width = 152,
.lcd_size_cfg.height = 91, .lcd_size_cfg.height = 91,

View file

@ -263,7 +263,7 @@ static struct clk div6_clks[DIV6_NR] = {
}; };
enum { MSTP001, enum { MSTP001,
MSTP125, MSTP118, MSTP116, MSTP100, MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100,
MSTP219, MSTP219,
MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
MSTP331, MSTP329, MSTP325, MSTP323, MSTP312, MSTP331, MSTP329, MSTP325, MSTP323, MSTP312,
@ -275,6 +275,10 @@ enum { MSTP001,
static struct clk mstp_clks[MSTP_NR] = { static struct clk mstp_clks[MSTP_NR] = {
[MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */ [MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */
[MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* CEU1 */
[MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* CSI2-RX1 */
[MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU0 */
[MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2-RX0 */
[MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */ [MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */
[MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */ [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */
[MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */ [MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
@ -306,6 +310,9 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("r_clk", &r_clk), CLKDEV_CON_ID("r_clk", &r_clk),
/* DIV6 clocks */ /* DIV6 clocks */
CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]),
CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]),
CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]),
CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]), CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]),
CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]), CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]),
CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]), CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]),
@ -313,11 +320,15 @@ static struct clk_lookup lookups[] = {
/* MSTP32 clocks */ /* MSTP32 clocks */
CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */ CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */
CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */ CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[MSTP129]), /* CEU1 */
CLKDEV_DEV_ID("sh-mobile-csi2.1", &mstp_clks[MSTP128]), /* CSI2-RX1 */
CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU0 */
CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2-RX0 */
CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */ CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */
CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */ CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */
CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */ CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */ CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */

View file

@ -6,13 +6,10 @@ LIST "RWT Setting"
EW 0xE6020004, 0xA500 EW 0xE6020004, 0xA500
EW 0xE6030004, 0xA500 EW 0xE6030004, 0xA500
DD 0x01001000, 0x01001000
LIST "GPIO Setting" LIST "GPIO Setting"
EB 0xE6051013, 0xA2 EB 0xE6051013, 0xA2
LIST "CPG" LIST "CPG"
ED 0xE6150080, 0x00000180
ED 0xE61500C0, 0x00000002 ED 0xE61500C0, 0x00000002
WAIT 1, 0xFE40009C WAIT 1, 0xFE40009C
@ -37,6 +34,9 @@ ED 0xE615002C, 0x93000040
WAIT 1, 0xFE40009C WAIT 1, 0xFE40009C
LIST "SUB/USBClk"
ED 0xE6150080, 0x00000180
LIST "BSC" LIST "BSC"
ED 0xFEC10000, 0x00E0001B ED 0xFEC10000, 0x00E0001B
@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505
ED 0xFE40004C, 0x00110209 ED 0xFE40004C, 0x00110209
ED 0xFE400010, 0x00000087 ED 0xFE400010, 0x00000087
WAIT 10, 0xFE40009C WAIT 30, 0xFE40009C
ED 0xFE400084, 0x0000003F ED 0xFE400084, 0x0000003F
EB 0xFE500000, 0x00 EB 0xFE500000, 0x00
@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050
WAIT 1, 0xFE40009C WAIT 1, 0xFE40009C
ED 0xE6150354, 0x00000002 ED 0xFE400354, 0x01AD8002
LIST "SCIF0 - Serial port for earlyprintk" LIST "SCIF0 - Serial port for earlyprintk"
EB 0xE6053098, 0x11 EB 0xE6053098, 0x11

View file

@ -6,13 +6,10 @@ LIST "RWT Setting"
EW 0xE6020004, 0xA500 EW 0xE6020004, 0xA500
EW 0xE6030004, 0xA500 EW 0xE6030004, 0xA500
DD 0x01001000, 0x01001000
LIST "GPIO Setting" LIST "GPIO Setting"
EB 0xE6051013, 0xA2 EB 0xE6051013, 0xA2
LIST "CPG" LIST "CPG"
ED 0xE6150080, 0x00000180
ED 0xE61500C0, 0x00000002 ED 0xE61500C0, 0x00000002
WAIT 1, 0xFE40009C WAIT 1, 0xFE40009C
@ -37,6 +34,9 @@ ED 0xE615002C, 0x93000040
WAIT 1, 0xFE40009C WAIT 1, 0xFE40009C
LIST "SUB/USBClk"
ED 0xE6150080, 0x00000180
LIST "BSC" LIST "BSC"
ED 0xFEC10000, 0x00E0001B ED 0xFEC10000, 0x00E0001B
@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505
ED 0xFE40004C, 0x00110209 ED 0xFE40004C, 0x00110209
ED 0xFE400010, 0x00000087 ED 0xFE400010, 0x00000087
WAIT 10, 0xFE40009C WAIT 30, 0xFE40009C
ED 0xFE400084, 0x0000003F ED 0xFE400084, 0x0000003F
EB 0xFE500000, 0x00 EB 0xFE500000, 0x00
@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050
WAIT 1, 0xFE40009C WAIT 1, 0xFE40009C
ED 0xE6150354, 0x00000002 ED 0xFE400354, 0x01AD8002
LIST "SCIF0 - Serial port for earlyprintk" LIST "SCIF0 - Serial port for earlyprintk"
EB 0xE6053098, 0x11 EB 0xE6053098, 0x11

View file

@ -56,10 +56,6 @@ struct omap_globals {
unsigned long prm; /* Power and Reset Management */ unsigned long prm; /* Power and Reset Management */
unsigned long cm; /* Clock Management */ unsigned long cm; /* Clock Management */
unsigned long cm2; unsigned long cm2;
unsigned long uart1_phys;
unsigned long uart2_phys;
unsigned long uart3_phys;
unsigned long uart4_phys;
}; };
void omap2_set_globals_242x(void); void omap2_set_globals_242x(void);

View file

@ -15,6 +15,8 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <plat/devs.h>
/* uart devices */ /* uart devices */
static struct platform_device s3c24xx_uart_device0 = { static struct platform_device s3c24xx_uart_device0 = {

View file

@ -13,6 +13,8 @@
.align 2 .align 2
ENTRY(_outsl) ENTRY(_outsl)
CC = R2 == 0;
IF CC JUMP 1f;
P0 = R0; /* P0 = port */ P0 = R0; /* P0 = port */
P1 = R1; /* P1 = address */ P1 = R1; /* P1 = address */
P2 = R2; /* P2 = count */ P2 = R2; /* P2 = count */
@ -20,10 +22,12 @@ ENTRY(_outsl)
LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2; LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2;
.Llong_loop_s: R0 = [P1++]; .Llong_loop_s: R0 = [P1++];
.Llong_loop_e: [P0] = R0; .Llong_loop_e: [P0] = R0;
RTS; 1: RTS;
ENDPROC(_outsl) ENDPROC(_outsl)
ENTRY(_outsw) ENTRY(_outsw)
CC = R2 == 0;
IF CC JUMP 1f;
P0 = R0; /* P0 = port */ P0 = R0; /* P0 = port */
P1 = R1; /* P1 = address */ P1 = R1; /* P1 = address */
P2 = R2; /* P2 = count */ P2 = R2; /* P2 = count */
@ -31,10 +35,12 @@ ENTRY(_outsw)
LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2; LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2;
.Lword_loop_s: R0 = W[P1++]; .Lword_loop_s: R0 = W[P1++];
.Lword_loop_e: W[P0] = R0; .Lword_loop_e: W[P0] = R0;
RTS; 1: RTS;
ENDPROC(_outsw) ENDPROC(_outsw)
ENTRY(_outsb) ENTRY(_outsb)
CC = R2 == 0;
IF CC JUMP 1f;
P0 = R0; /* P0 = port */ P0 = R0; /* P0 = port */
P1 = R1; /* P1 = address */ P1 = R1; /* P1 = address */
P2 = R2; /* P2 = count */ P2 = R2; /* P2 = count */
@ -42,10 +48,12 @@ ENTRY(_outsb)
LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2; LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2;
.Lbyte_loop_s: R0 = B[P1++]; .Lbyte_loop_s: R0 = B[P1++];
.Lbyte_loop_e: B[P0] = R0; .Lbyte_loop_e: B[P0] = R0;
RTS; 1: RTS;
ENDPROC(_outsb) ENDPROC(_outsb)
ENTRY(_outsw_8) ENTRY(_outsw_8)
CC = R2 == 0;
IF CC JUMP 1f;
P0 = R0; /* P0 = port */ P0 = R0; /* P0 = port */
P1 = R1; /* P1 = address */ P1 = R1; /* P1 = address */
P2 = R2; /* P2 = count */ P2 = R2; /* P2 = count */
@ -56,5 +64,5 @@ ENTRY(_outsw_8)
R0 = R0 << 8; R0 = R0 << 8;
R0 = R0 + R1; R0 = R0 + R1;
.Lword8_loop_e: W[P0] = R0; .Lword8_loop_e: W[P0] = R0;
RTS; 1: RTS;
ENDPROC(_outsw_8) ENDPROC(_outsw_8)

View file

@ -58,6 +58,8 @@
1: 1:
.ifeqs "\flushins", BROK_FLUSH_INST .ifeqs "\flushins", BROK_FLUSH_INST
\flushins [P0++]; \flushins [P0++];
nop;
nop;
2: nop; 2: nop;
.else .else
2: \flushins [P0++]; 2: \flushins [P0++];

View file

@ -240,6 +240,12 @@ struct machdep_calls {
* claims to support kexec. * claims to support kexec.
*/ */
int (*machine_kexec_prepare)(struct kimage *image); int (*machine_kexec_prepare)(struct kimage *image);
/* Called to perform the _real_ kexec.
* Do NOT allocate memory or fail here. We are past the point of
* no return.
*/
void (*machine_kexec)(struct kimage *image);
#endif /* CONFIG_KEXEC */ #endif /* CONFIG_KEXEC */
#ifdef CONFIG_SUSPEND #ifdef CONFIG_SUSPEND

View file

@ -87,7 +87,10 @@ void machine_kexec(struct kimage *image)
save_ftrace_enabled = __ftrace_enabled_save(); save_ftrace_enabled = __ftrace_enabled_save();
default_machine_kexec(image); if (ppc_md.machine_kexec)
ppc_md.machine_kexec(image);
else
default_machine_kexec(image);
__ftrace_enabled_restore(save_ftrace_enabled); __ftrace_enabled_restore(save_ftrace_enabled);

View file

@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread)
prime_debug_regs(new_thread); prime_debug_regs(new_thread);
} }
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */ #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
#ifndef CONFIG_HAVE_HW_BREAKPOINT
static void set_debug_reg_defaults(struct thread_struct *thread) static void set_debug_reg_defaults(struct thread_struct *thread)
{ {
if (thread->dabr) { if (thread->dabr) {
@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
set_dabr(0); set_dabr(0);
} }
} }
#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */ #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
int set_dabr(unsigned long dabr) int set_dabr(unsigned long dabr)
@ -670,11 +672,11 @@ void flush_thread(void)
{ {
discard_lazy_cpu_state(); discard_lazy_cpu_state();
#ifdef CONFIG_HAVE_HW_BREAKPOINTS #ifdef CONFIG_HAVE_HW_BREAKPOINT
flush_ptrace_hw_breakpoint(current); flush_ptrace_hw_breakpoint(current);
#else /* CONFIG_HAVE_HW_BREAKPOINTS */ #else /* CONFIG_HAVE_HW_BREAKPOINT */
set_debug_reg_defaults(&current->thread); set_debug_reg_defaults(&current->thread);
#endif /* CONFIG_HAVE_HW_BREAKPOINTS */ #endif /* CONFIG_HAVE_HW_BREAKPOINT */
} }
void void

View file

@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
* neesd to be flushed. This function will either perform the flush * neesd to be flushed. This function will either perform the flush
* immediately or will batch it up if the current CPU has an active * immediately or will batch it up if the current CPU has an active
* batch on it. * batch on it.
*
* Must be called from within some kind of spinlock/non-preempt region...
*/ */
void hpte_need_flush(struct mm_struct *mm, unsigned long addr, void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long pte, int huge) pte_t *ptep, unsigned long pte, int huge)
{ {
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
unsigned long vsid, vaddr; unsigned long vsid, vaddr;
unsigned int psize; unsigned int psize;
int ssize; int ssize;
@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
*/ */
if (!batch->active) { if (!batch->active) {
flush_hash_page(vaddr, rpte, psize, ssize, 0); flush_hash_page(vaddr, rpte, psize, ssize, 0);
put_cpu_var(ppc64_tlb_batch);
return; return;
} }
@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
batch->index = ++i; batch->index = ++i;
if (i >= PPC64_TLB_BATCH_NR) if (i >= PPC64_TLB_BATCH_NR)
__flush_tlb_pending(batch); __flush_tlb_pending(batch);
put_cpu_var(ppc64_tlb_batch);
} }
/* /*

View file

@ -3,7 +3,7 @@
#include <asm-generic/sections.h> #include <asm-generic/sections.h>
extern void __nosave_begin, __nosave_end; extern long __nosave_begin, __nosave_end;
extern long __machvec_start, __machvec_end; extern long __machvec_start, __machvec_end;
extern char __uncached_start, __uncached_end; extern char __uncached_start, __uncached_end;
extern char _ebss[]; extern char _ebss[];

View file

@ -14,7 +14,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/sh_timer.h> #include <linux/sh_timer.h>
#include <linux/serial_sci.h> #include <linux/serial_sci.h>
#include <asm/machtypes.h> #include <generated/machtypes.h>
static struct resource rtc_resources[] = { static struct resource rtc_resources[] = {
[0] = { [0] = {
@ -255,12 +255,17 @@ static struct platform_device *sh7750_early_devices[] __initdata = {
void __init plat_early_device_setup(void) void __init plat_early_device_setup(void)
{ {
struct platform_device *dev[1];
if (mach_is_rts7751r2d()) { if (mach_is_rts7751r2d()) {
scif_platform_data.scscr |= SCSCR_CKE1; scif_platform_data.scscr |= SCSCR_CKE1;
early_platform_add_devices(&scif_device, 1); dev[0] = &scif_device;
early_platform_add_devices(dev, 1);
} else { } else {
early_platform_add_devices(&sci_device, 1); dev[0] = &sci_device;
early_platform_add_devices(&scif_device, 1); early_platform_add_devices(dev, 1);
dev[0] = &scif_device;
early_platform_add_devices(dev, 1);
} }
early_platform_add_devices(sh7750_early_devices, early_platform_add_devices(sh7750_early_devices,

View file

@ -10,6 +10,16 @@
void __delay(unsigned long loops) void __delay(unsigned long loops)
{ {
__asm__ __volatile__( __asm__ __volatile__(
/*
* ST40-300 appears to have an issue with this code,
* normally taking two cycles each loop, as with all
* other SH variants. If however the branch and the
* delay slot straddle an 8 byte boundary, this increases
* to 3 cycles.
* This align directive ensures this doesn't occur.
*/
".balign 8\n\t"
"tst %0, %0\n\t" "tst %0, %0\n\t"
"1:\t" "1:\t"
"bf/s 1b\n\t" "bf/s 1b\n\t"

View file

@ -108,7 +108,8 @@ void copy_user_highpage(struct page *to, struct page *from,
kunmap_atomic(vfrom, KM_USER0); kunmap_atomic(vfrom, KM_USER0);
} }
if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
(vma->vm_flags & VM_EXEC))
__flush_purge_region(vto, PAGE_SIZE); __flush_purge_region(vto, PAGE_SIZE);
kunmap_atomic(vto, KM_USER1); kunmap_atomic(vto, KM_USER1);

View file

@ -36,6 +36,11 @@
#define MSR_IA32_PERFCTR1 0x000000c2 #define MSR_IA32_PERFCTR1 0x000000c2
#define MSR_FSB_FREQ 0x000000cd #define MSR_FSB_FREQ 0x000000cd
#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
#define NHM_C3_AUTO_DEMOTE (1UL << 25)
#define NHM_C1_AUTO_DEMOTE (1UL << 26)
#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25)
#define MSR_MTRRcap 0x000000fe #define MSR_MTRRcap 0x000000fe
#define MSR_IA32_BBL_CR_CTL 0x00000119 #define MSR_IA32_BBL_CR_CTL 0x00000119

View file

@ -158,9 +158,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
{ {
if (c->x86 == 0x06) { if (c->x86 == 0x06) {
if (cpu_has(c, X86_FEATURE_EST)) if (cpu_has(c, X86_FEATURE_EST))
printk(KERN_WARNING PFX "Warning: EST-capable CPU " printk_once(KERN_WARNING PFX "Warning: EST-capable "
"detected. The acpi-cpufreq module offers " "CPU detected. The acpi-cpufreq module offers "
"voltage scaling in addition of frequency " "voltage scaling in addition to frequency "
"scaling. You should use that instead of " "scaling. You should use that instead of "
"p4-clockmod, if possible.\n"); "p4-clockmod, if possible.\n");
switch (c->x86_model) { switch (c->x86_model) {

View file

@ -1537,6 +1537,7 @@ static struct notifier_block cpb_nb = {
static int __cpuinit powernowk8_init(void) static int __cpuinit powernowk8_init(void)
{ {
unsigned int i, supported_cpus = 0, cpu; unsigned int i, supported_cpus = 0, cpu;
int rv;
for_each_online_cpu(i) { for_each_online_cpu(i) {
int rc; int rc;
@ -1555,14 +1556,14 @@ static int __cpuinit powernowk8_init(void)
cpb_capable = true; cpb_capable = true;
register_cpu_notifier(&cpb_nb);
msrs = msrs_alloc(); msrs = msrs_alloc();
if (!msrs) { if (!msrs) {
printk(KERN_ERR "%s: Error allocating msrs!\n", __func__); printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
return -ENOMEM; return -ENOMEM;
} }
register_cpu_notifier(&cpb_nb);
rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
for_each_cpu(cpu, cpu_online_mask) { for_each_cpu(cpu, cpu_online_mask) {
@ -1574,7 +1575,13 @@ static int __cpuinit powernowk8_init(void)
(cpb_enabled ? "on" : "off")); (cpb_enabled ? "on" : "off"));
} }
return cpufreq_register_driver(&cpufreq_amd64_driver); rv = cpufreq_register_driver(&cpufreq_amd64_driver);
if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
unregister_cpu_notifier(&cpb_nb);
msrs_free(msrs);
msrs = NULL;
}
return rv;
} }
/* driver entry point for term */ /* driver entry point for term */

View file

@ -140,8 +140,7 @@ void * __init prom_early_alloc(unsigned long size)
* wasted bootmem) and hand off chunks of it to callers. * wasted bootmem) and hand off chunks of it to callers.
*/ */
res = alloc_bootmem(chunk_size); res = alloc_bootmem(chunk_size);
if (!res) BUG_ON(!res);
return NULL;
prom_early_allocated += chunk_size; prom_early_allocated += chunk_size;
memset(res, 0, chunk_size); memset(res, 0, chunk_size);
free_mem = chunk_size; free_mem = chunk_size;

View file

@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q)
WARN_ON(!irqs_disabled()); WARN_ON(!irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q); queue_flag_clear(QUEUE_FLAG_STOPPED, q);
__blk_run_queue(q); __blk_run_queue(q, false);
} }
EXPORT_SYMBOL(blk_start_queue); EXPORT_SYMBOL(blk_start_queue);
@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue);
/** /**
* __blk_run_queue - run a single device queue * __blk_run_queue - run a single device queue
* @q: The queue to run * @q: The queue to run
* @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
* *
* Description: * Description:
* See @blk_run_queue. This variant must be called with the queue lock * See @blk_run_queue. This variant must be called with the queue lock
* held and interrupts disabled. * held and interrupts disabled.
* *
*/ */
void __blk_run_queue(struct request_queue *q) void __blk_run_queue(struct request_queue *q, bool force_kblockd)
{ {
blk_remove_plug(q); blk_remove_plug(q);
@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q)
* Only recurse once to avoid overrunning the stack, let the unplug * Only recurse once to avoid overrunning the stack, let the unplug
* handling reinvoke the handler shortly if we already got there. * handling reinvoke the handler shortly if we already got there.
*/ */
if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
q->request_fn(q); q->request_fn(q);
queue_flag_clear(QUEUE_FLAG_REENTER, q); queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else { } else {
@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
__blk_run_queue(q); __blk_run_queue(q, false);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
EXPORT_SYMBOL(blk_run_queue); EXPORT_SYMBOL(blk_run_queue);
@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
drive_stat_acct(rq, 1); drive_stat_acct(rq, 1);
__elv_add_request(q, rq, where, 0); __elv_add_request(q, rq, where, 0);
__blk_run_queue(q); __blk_run_queue(q, false);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
EXPORT_SYMBOL(blk_insert_request); EXPORT_SYMBOL(blk_insert_request);
@ -2610,13 +2611,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
} }
EXPORT_SYMBOL(kblockd_schedule_work); EXPORT_SYMBOL(kblockd_schedule_work);
int kblockd_schedule_delayed_work(struct request_queue *q,
struct delayed_work *dwork, unsigned long delay)
{
return queue_delayed_work(kblockd_workqueue, dwork, delay);
}
EXPORT_SYMBOL(kblockd_schedule_delayed_work);
int __init blk_dev_init(void) int __init blk_dev_init(void)
{ {
BUILD_BUG_ON(__REQ_NR_BITS > 8 * BUILD_BUG_ON(__REQ_NR_BITS > 8 *

View file

@ -66,10 +66,12 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q,
/* /*
* Moving a request silently to empty queue_head may stall the * Moving a request silently to empty queue_head may stall the
* queue. Kick the queue in those cases. * queue. Kick the queue in those cases. This function is called
* from request completion path and calling directly into
* request_fn may confuse the driver. Always use kblockd.
*/ */
if (was_empty && next_rq) if (was_empty && next_rq)
__blk_run_queue(q); __blk_run_queue(q, true);
} }
static void pre_flush_end_io(struct request *rq, int error) static void pre_flush_end_io(struct request *rq, int error)
@ -130,7 +132,7 @@ static struct request *queue_next_fseq(struct request_queue *q)
BUG(); BUG();
} }
elv_insert(q, rq, ELEVATOR_INSERT_FRONT); elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
return rq; return rq;
} }

View file

@ -132,7 +132,7 @@ static void bio_batch_end_io(struct bio *bio, int err)
} }
/** /**
* blkdev_issue_zeroout generate number of zero filed write bios * blkdev_issue_zeroout - generate number of zero filed write bios
* @bdev: blockdev to issue * @bdev: blockdev to issue
* @sector: start sector * @sector: start sector
* @nr_sects: number of sectors to write * @nr_sects: number of sectors to write

View file

@ -20,6 +20,11 @@ static int throtl_quantum = 32;
/* Throttling is performed over 100ms slice and after that slice is renewed */ /* Throttling is performed over 100ms slice and after that slice is renewed */
static unsigned long throtl_slice = HZ/10; /* 100 ms */ static unsigned long throtl_slice = HZ/10; /* 100 ms */
/* A workqueue to queue throttle related work */
static struct workqueue_struct *kthrotld_workqueue;
static void throtl_schedule_delayed_work(struct throtl_data *td,
unsigned long delay);
struct throtl_rb_root { struct throtl_rb_root {
struct rb_root rb; struct rb_root rb;
struct rb_node *left; struct rb_node *left;
@ -345,10 +350,9 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td)
update_min_dispatch_time(st); update_min_dispatch_time(st);
if (time_before_eq(st->min_disptime, jiffies)) if (time_before_eq(st->min_disptime, jiffies))
throtl_schedule_delayed_work(td->queue, 0); throtl_schedule_delayed_work(td, 0);
else else
throtl_schedule_delayed_work(td->queue, throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
(st->min_disptime - jiffies));
} }
static inline void static inline void
@ -815,10 +819,10 @@ void blk_throtl_work(struct work_struct *work)
} }
/* Call with queue lock held */ /* Call with queue lock held */
void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) static void
throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
{ {
struct throtl_data *td = q->td;
struct delayed_work *dwork = &td->throtl_work; struct delayed_work *dwork = &td->throtl_work;
if (total_nr_queued(td) > 0) { if (total_nr_queued(td) > 0) {
@ -827,12 +831,11 @@ void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
* Cancel that and schedule a new one. * Cancel that and schedule a new one.
*/ */
__cancel_delayed_work(dwork); __cancel_delayed_work(dwork);
kblockd_schedule_delayed_work(q, dwork, delay); queue_delayed_work(kthrotld_workqueue, dwork, delay);
throtl_log(td, "schedule work. delay=%lu jiffies=%lu", throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
delay, jiffies); delay, jiffies);
} }
} }
EXPORT_SYMBOL(throtl_schedule_delayed_work);
static void static void
throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg) throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
@ -920,7 +923,7 @@ static void throtl_update_blkio_group_read_bps(void *key,
smp_mb__after_atomic_inc(); smp_mb__after_atomic_inc();
/* Schedule a work now to process the limit change */ /* Schedule a work now to process the limit change */
throtl_schedule_delayed_work(td->queue, 0); throtl_schedule_delayed_work(td, 0);
} }
static void throtl_update_blkio_group_write_bps(void *key, static void throtl_update_blkio_group_write_bps(void *key,
@ -934,7 +937,7 @@ static void throtl_update_blkio_group_write_bps(void *key,
smp_mb__before_atomic_inc(); smp_mb__before_atomic_inc();
atomic_inc(&td->limits_changed); atomic_inc(&td->limits_changed);
smp_mb__after_atomic_inc(); smp_mb__after_atomic_inc();
throtl_schedule_delayed_work(td->queue, 0); throtl_schedule_delayed_work(td, 0);
} }
static void throtl_update_blkio_group_read_iops(void *key, static void throtl_update_blkio_group_read_iops(void *key,
@ -948,7 +951,7 @@ static void throtl_update_blkio_group_read_iops(void *key,
smp_mb__before_atomic_inc(); smp_mb__before_atomic_inc();
atomic_inc(&td->limits_changed); atomic_inc(&td->limits_changed);
smp_mb__after_atomic_inc(); smp_mb__after_atomic_inc();
throtl_schedule_delayed_work(td->queue, 0); throtl_schedule_delayed_work(td, 0);
} }
static void throtl_update_blkio_group_write_iops(void *key, static void throtl_update_blkio_group_write_iops(void *key,
@ -962,7 +965,7 @@ static void throtl_update_blkio_group_write_iops(void *key,
smp_mb__before_atomic_inc(); smp_mb__before_atomic_inc();
atomic_inc(&td->limits_changed); atomic_inc(&td->limits_changed);
smp_mb__after_atomic_inc(); smp_mb__after_atomic_inc();
throtl_schedule_delayed_work(td->queue, 0); throtl_schedule_delayed_work(td, 0);
} }
void throtl_shutdown_timer_wq(struct request_queue *q) void throtl_shutdown_timer_wq(struct request_queue *q)
@ -1135,6 +1138,10 @@ void blk_throtl_exit(struct request_queue *q)
static int __init throtl_init(void) static int __init throtl_init(void)
{ {
kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
if (!kthrotld_workqueue)
panic("Failed to create kthrotld\n");
blkio_policy_register(&blkio_policy_throtl); blkio_policy_register(&blkio_policy_throtl);
return 0; return 0;
} }

View file

@ -3355,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqd->busy_queues > 1) { cfqd->busy_queues > 1) {
cfq_del_timer(cfqd, cfqq); cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq); cfq_clear_cfqq_wait_request(cfqq);
__blk_run_queue(cfqd->queue); __blk_run_queue(cfqd->queue, false);
} else { } else {
cfq_blkiocg_update_idle_time_stats( cfq_blkiocg_update_idle_time_stats(
&cfqq->cfqg->blkg); &cfqq->cfqg->blkg);
@ -3370,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* this new queue is RT and the current one is BE * this new queue is RT and the current one is BE
*/ */
cfq_preempt_queue(cfqd, cfqq); cfq_preempt_queue(cfqd, cfqq);
__blk_run_queue(cfqd->queue); __blk_run_queue(cfqd->queue, false);
} }
} }
@ -3731,7 +3731,7 @@ static void cfq_kick_queue(struct work_struct *work)
struct request_queue *q = cfqd->queue; struct request_queue *q = cfqd->queue;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
__blk_run_queue(cfqd->queue); __blk_run_queue(cfqd->queue, false);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
} }

View file

@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q)
*/ */
elv_drain_elevator(q); elv_drain_elevator(q);
while (q->rq.elvpriv) { while (q->rq.elvpriv) {
__blk_run_queue(q); __blk_run_queue(q, false);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
msleep(10); msleep(10);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
* with anything. There's no point in delaying queue * with anything. There's no point in delaying queue
* processing. * processing.
*/ */
__blk_run_queue(q); __blk_run_queue(q, false);
break; break;
case ELEVATOR_INSERT_SORT: case ELEVATOR_INSERT_SORT:

View file

@ -416,10 +416,15 @@ struct acpi_gpe_handler_info {
u8 originally_enabled; /* True if GPE was originally enabled */ u8 originally_enabled; /* True if GPE was originally enabled */
}; };
struct acpi_gpe_notify_object {
struct acpi_namespace_node *node;
struct acpi_gpe_notify_object *next;
};
union acpi_gpe_dispatch_info { union acpi_gpe_dispatch_info {
struct acpi_namespace_node *method_node; /* Method node for this GPE level */ struct acpi_namespace_node *method_node; /* Method node for this GPE level */
struct acpi_gpe_handler_info *handler; /* Installed GPE handler */ struct acpi_gpe_handler_info *handler; /* Installed GPE handler */
struct acpi_namespace_node *device_node; /* Parent _PRW device for implicit notify */ struct acpi_gpe_notify_object device; /* List of _PRW devices for implicit notify */
}; };
/* /*

View file

@ -457,6 +457,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
acpi_status status; acpi_status status;
struct acpi_gpe_event_info *local_gpe_event_info; struct acpi_gpe_event_info *local_gpe_event_info;
struct acpi_evaluate_info *info; struct acpi_evaluate_info *info;
struct acpi_gpe_notify_object *notify_object;
ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
@ -508,10 +509,18 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
* from this thread -- because handlers may in turn run other * from this thread -- because handlers may in turn run other
* control methods. * control methods.
*/ */
status = status = acpi_ev_queue_notify_request(
acpi_ev_queue_notify_request(local_gpe_event_info->dispatch. local_gpe_event_info->dispatch.device.node,
device_node, ACPI_NOTIFY_DEVICE_WAKE);
ACPI_NOTIFY_DEVICE_WAKE);
notify_object = local_gpe_event_info->dispatch.device.next;
while (ACPI_SUCCESS(status) && notify_object) {
status = acpi_ev_queue_notify_request(
notify_object->node,
ACPI_NOTIFY_DEVICE_WAKE);
notify_object = notify_object->next;
}
break; break;
case ACPI_GPE_DISPATCH_METHOD: case ACPI_GPE_DISPATCH_METHOD:

View file

@ -198,7 +198,9 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
acpi_status status = AE_BAD_PARAMETER; acpi_status status = AE_BAD_PARAMETER;
struct acpi_gpe_event_info *gpe_event_info; struct acpi_gpe_event_info *gpe_event_info;
struct acpi_namespace_node *device_node; struct acpi_namespace_node *device_node;
struct acpi_gpe_notify_object *notify_object;
acpi_cpu_flags flags; acpi_cpu_flags flags;
u8 gpe_dispatch_mask;
ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake); ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake);
@ -221,27 +223,49 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
goto unlock_and_exit; goto unlock_and_exit;
} }
if (wake_device == ACPI_ROOT_OBJECT) {
goto out;
}
/* /*
* If there is no method or handler for this GPE, then the * If there is no method or handler for this GPE, then the
* wake_device will be notified whenever this GPE fires (aka * wake_device will be notified whenever this GPE fires (aka
* "implicit notify") Note: The GPE is assumed to be * "implicit notify") Note: The GPE is assumed to be
* level-triggered (for windows compatibility). * level-triggered (for windows compatibility).
*/ */
if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK;
ACPI_GPE_DISPATCH_NONE) && (wake_device != ACPI_ROOT_OBJECT)) { if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE
&& gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) {
/* Validate wake_device is of type Device */ goto out;
device_node = ACPI_CAST_PTR(struct acpi_namespace_node,
wake_device);
if (device_node->type != ACPI_TYPE_DEVICE) {
goto unlock_and_exit;
}
gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
ACPI_GPE_LEVEL_TRIGGERED);
gpe_event_info->dispatch.device_node = device_node;
} }
/* Validate wake_device is of type Device */
device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
if (device_node->type != ACPI_TYPE_DEVICE) {
goto unlock_and_exit;
}
if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) {
gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
ACPI_GPE_LEVEL_TRIGGERED);
gpe_event_info->dispatch.device.node = device_node;
gpe_event_info->dispatch.device.next = NULL;
} else {
/* There are multiple devices to notify implicitly. */
notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object));
if (!notify_object) {
status = AE_NO_MEMORY;
goto unlock_and_exit;
}
notify_object->node = device_node;
notify_object->next = gpe_event_info->dispatch.device.next;
gpe_event_info->dispatch.device.next = notify_object;
}
out:
gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
status = AE_OK; status = AE_OK;

View file

@ -26,7 +26,9 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
static char *buf; static char *buf;
static int uncopied_bytes; static u32 max_size;
static u32 uncopied_bytes;
struct acpi_table_header table; struct acpi_table_header table;
acpi_status status; acpi_status status;
@ -37,19 +39,24 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
if (copy_from_user(&table, user_buf, if (copy_from_user(&table, user_buf,
sizeof(struct acpi_table_header))) sizeof(struct acpi_table_header)))
return -EFAULT; return -EFAULT;
uncopied_bytes = table.length; uncopied_bytes = max_size = table.length;
buf = kzalloc(uncopied_bytes, GFP_KERNEL); buf = kzalloc(max_size, GFP_KERNEL);
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
} }
if (uncopied_bytes < count) { if (buf == NULL)
kfree(buf); return -EINVAL;
if ((*ppos > max_size) ||
(*ppos + count > max_size) ||
(*ppos + count < count) ||
(count > uncopied_bytes))
return -EINVAL; return -EINVAL;
}
if (copy_from_user(buf + (*ppos), user_buf, count)) { if (copy_from_user(buf + (*ppos), user_buf, count)) {
kfree(buf); kfree(buf);
buf = NULL;
return -EFAULT; return -EFAULT;
} }
@ -59,6 +66,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
if (!uncopied_bytes) { if (!uncopied_bytes) {
status = acpi_install_method(buf); status = acpi_install_method(buf);
kfree(buf); kfree(buf);
buf = NULL;
if (ACPI_FAILURE(status)) if (ACPI_FAILURE(status))
return -EINVAL; return -EINVAL;
add_taint(TAINT_OVERRIDDEN_ACPI_TABLE); add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);

View file

@ -78,7 +78,6 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
static DEFINE_MUTEX(loop_mutex);
static LIST_HEAD(loop_devices); static LIST_HEAD(loop_devices);
static DEFINE_MUTEX(loop_devices_mutex); static DEFINE_MUTEX(loop_devices_mutex);
@ -1501,11 +1500,9 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
{ {
struct loop_device *lo = bdev->bd_disk->private_data; struct loop_device *lo = bdev->bd_disk->private_data;
mutex_lock(&loop_mutex);
mutex_lock(&lo->lo_ctl_mutex); mutex_lock(&lo->lo_ctl_mutex);
lo->lo_refcnt++; lo->lo_refcnt++;
mutex_unlock(&lo->lo_ctl_mutex); mutex_unlock(&lo->lo_ctl_mutex);
mutex_unlock(&loop_mutex);
return 0; return 0;
} }
@ -1515,7 +1512,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
struct loop_device *lo = disk->private_data; struct loop_device *lo = disk->private_data;
int err; int err;
mutex_lock(&loop_mutex);
mutex_lock(&lo->lo_ctl_mutex); mutex_lock(&lo->lo_ctl_mutex);
if (--lo->lo_refcnt) if (--lo->lo_refcnt)
@ -1540,7 +1536,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
out: out:
mutex_unlock(&lo->lo_ctl_mutex); mutex_unlock(&lo->lo_ctl_mutex);
out_unlocked: out_unlocked:
mutex_unlock(&loop_mutex);
return 0; return 0;
} }

View file

@ -388,6 +388,10 @@ static void discard_port_data(struct port *port)
unsigned int len; unsigned int len;
int ret; int ret;
if (!port->portdev) {
/* Device has been unplugged. vqs are already gone. */
return;
}
vq = port->in_vq; vq = port->in_vq;
if (port->inbuf) if (port->inbuf)
buf = port->inbuf; buf = port->inbuf;
@ -470,6 +474,10 @@ static void reclaim_consumed_buffers(struct port *port)
void *buf; void *buf;
unsigned int len; unsigned int len;
if (!port->portdev) {
/* Device has been unplugged. vqs are already gone. */
return;
}
while ((buf = virtqueue_get_buf(port->out_vq, &len))) { while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
kfree(buf); kfree(buf);
port->outvq_full = false; port->outvq_full = false;

View file

@ -1919,8 +1919,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
ret = sysdev_driver_register(&cpu_sysdev_class, ret = sysdev_driver_register(&cpu_sysdev_class,
&cpufreq_sysdev_driver); &cpufreq_sysdev_driver);
if (ret)
goto err_null_driver;
if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) { if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
int i; int i;
ret = -ENODEV; ret = -ENODEV;
@ -1935,21 +1937,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (ret) { if (ret) {
dprintk("no CPU initialized for driver %s\n", dprintk("no CPU initialized for driver %s\n",
driver_data->name); driver_data->name);
sysdev_driver_unregister(&cpu_sysdev_class, goto err_sysdev_unreg;
&cpufreq_sysdev_driver);
spin_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
} }
} }
if (!ret) { register_hotcpu_notifier(&cpufreq_cpu_notifier);
register_hotcpu_notifier(&cpufreq_cpu_notifier); dprintk("driver %s up and running\n", driver_data->name);
dprintk("driver %s up and running\n", driver_data->name); cpufreq_debug_enable_ratelimit();
cpufreq_debug_enable_ratelimit();
}
return 0;
err_sysdev_unreg:
sysdev_driver_unregister(&cpu_sysdev_class,
&cpufreq_sysdev_driver);
err_null_driver:
spin_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(cpufreq_register_driver); EXPORT_SYMBOL_GPL(cpufreq_register_driver);

View file

@ -672,7 +672,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
struct drm_crtc_helper_funcs *crtc_funcs; struct drm_crtc_helper_funcs *crtc_funcs;
u16 *red, *green, *blue, *transp; u16 *red, *green, *blue, *transp;
struct drm_crtc *crtc; struct drm_crtc *crtc;
int i, rc = 0; int i, j, rc = 0;
int start; int start;
for (i = 0; i < fb_helper->crtc_count; i++) { for (i = 0; i < fb_helper->crtc_count; i++) {
@ -685,7 +685,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
transp = cmap->transp; transp = cmap->transp;
start = cmap->start; start = cmap->start;
for (i = 0; i < cmap->len; i++) { for (j = 0; j < cmap->len; j++) {
u16 hred, hgreen, hblue, htransp = 0xffff; u16 hred, hgreen, hblue, htransp = 0xffff;
hred = *red++; hred = *red++;

Some files were not shown because too many files have changed in this diff Show more