Linux 3.18
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJUhNLZAAoJEHm+PkMAQRiGAEcH/iclYDW7k2GKemMqboy+Ohmh +ELbQothNhlGZlS1wWdD69LBiiXkkQ+ufVYFh/hC0oy0gUdfPMt5t+bOHy6cjn6w 9zOcACtpDKnqbOwRqXZjZgNmIabk7lRjbn7GK4GQqpIaW4oO0FWcT91FFhtGSPDa tjtmGRqDmbNsqfzr18h0WPEpUZmT6MxIdv17AYDliPB1MaaRuAv1Kss05TJrXdfL Oucv+C0uwnybD9UWAz6pLJ3H/HR9VJFdkaJ4Y0pbCHAuxdd1+swoTpicluHlsJA1 EkK5iWQRMpcmGwKvB0unCAQljNpaJiq4/Tlmmv8JlYpMlmIiVLT0D8BZx5q05QQ= =oGNw -----END PGP SIGNATURE----- Merge tag 'v3.18' into drm-next Linux 3.18 Backmerge Linus tree into -next as we had conflicts in i915/radeon/nouveau, and everyone was solving them individually. * tag 'v3.18': (57 commits) Linux 3.18 watchdog: s3c2410_wdt: Fix the mask bit offset for Exynos7 uapi: fix to export linux/vm_sockets.h i2c: cadence: Set the hardware time-out register to maximum value i2c: davinci: generate STP always when NACK is received ahci: disable MSI on SAMSUNG 0xa800 SSD context_tracking: Restore previous state in schedule_user slab: fix nodeid bounds check for non-contiguous node IDs lib/genalloc.c: export devm_gen_pool_create() for modules mm: fix anon_vma_clone() error treatment mm: fix swapoff hang after page migration and fork fat: fix oops on corrupted vfat fs ipc/sem.c: fully initialize sem_array before making it visible drivers/input/evdev.c: don't kfree() a vmalloc address cxgb4: Fill in supported link mode for SFP modules xen-netfront: Remove BUGs on paged skb data which crosses a page boundary mm/vmpressure.c: fix race in vmpressure_work_fn() mm: frontswap: invalidate expired data on a dup-store failure mm: do not overwrite reserved pages counter at show_mem() drm/radeon: kernel panic in drm_calc_vbltimestamp_from_scanoutpos with 3.18.0-rc6 ... Conflicts: drivers/gpu/drm/i915/intel_display.c drivers/gpu/drm/nouveau/nouveau_drm.c drivers/gpu/drm/radeon/radeon_cs.c
This commit is contained in:
commit
8c86394470
60 changed files with 356 additions and 268 deletions
38
MAINTAINERS
38
MAINTAINERS
|
@ -1838,7 +1838,7 @@ F: include/net/ax25.h
|
||||||
F: net/ax25/
|
F: net/ax25/
|
||||||
|
|
||||||
AZ6007 DVB DRIVER
|
AZ6007 DVB DRIVER
|
||||||
M: Mauro Carvalho Chehab <m.chehab@samsung.com>
|
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
W: http://linuxtv.org
|
W: http://linuxtv.org
|
||||||
T: git git://linuxtv.org/media_tree.git
|
T: git git://linuxtv.org/media_tree.git
|
||||||
|
@ -2208,7 +2208,7 @@ F: Documentation/filesystems/btrfs.txt
|
||||||
F: fs/btrfs/
|
F: fs/btrfs/
|
||||||
|
|
||||||
BTTV VIDEO4LINUX DRIVER
|
BTTV VIDEO4LINUX DRIVER
|
||||||
M: Mauro Carvalho Chehab <m.chehab@samsung.com>
|
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
W: http://linuxtv.org
|
W: http://linuxtv.org
|
||||||
T: git git://linuxtv.org/media_tree.git
|
T: git git://linuxtv.org/media_tree.git
|
||||||
|
@ -2729,7 +2729,7 @@ F: drivers/media/common/cx2341x*
|
||||||
F: include/media/cx2341x*
|
F: include/media/cx2341x*
|
||||||
|
|
||||||
CX88 VIDEO4LINUX DRIVER
|
CX88 VIDEO4LINUX DRIVER
|
||||||
M: Mauro Carvalho Chehab <m.chehab@samsung.com>
|
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
W: http://linuxtv.org
|
W: http://linuxtv.org
|
||||||
T: git git://linuxtv.org/media_tree.git
|
T: git git://linuxtv.org/media_tree.git
|
||||||
|
@ -3419,7 +3419,7 @@ F: fs/ecryptfs/
|
||||||
EDAC-CORE
|
EDAC-CORE
|
||||||
M: Doug Thompson <dougthompson@xmission.com>
|
M: Doug Thompson <dougthompson@xmission.com>
|
||||||
M: Borislav Petkov <bp@alien8.de>
|
M: Borislav Petkov <bp@alien8.de>
|
||||||
M: Mauro Carvalho Chehab <m.chehab@samsung.com>
|
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
|
||||||
L: linux-edac@vger.kernel.org
|
L: linux-edac@vger.kernel.org
|
||||||
W: bluesmoke.sourceforge.net
|
W: bluesmoke.sourceforge.net
|
||||||
S: Supported
|
S: Supported
|
||||||
|
@ -3468,7 +3468,7 @@ S: Maintained
|
||||||
F: drivers/edac/e7xxx_edac.c
|
F: drivers/edac/e7xxx_edac.c
|
||||||
|
|
||||||
EDAC-GHES
|
EDAC-GHES
|
||||||
M: Mauro Carvalho Chehab <m.chehab@samsung.com>
|
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
|
||||||
L: linux-edac@vger.kernel.org
|
L: linux-edac@vger.kernel.org
|
||||||
W: bluesmoke.sourceforge.net
|
W: bluesmoke.sourceforge.net
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
@ -3496,21 +3496,21 @@ S: Maintained
|
||||||
F: drivers/edac/i5000_edac.c
|
F: drivers/edac/i5000_edac.c
|
||||||
|
|
||||||
EDAC-I5400
|
EDAC-I5400
|
||||||
M: Mauro Carvalho Chehab <m.chehab@samsung.com>
|
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
|
||||||
L: linux-edac@vger.kernel.org
|
L: linux-edac@vger.kernel.org
|
||||||
W: bluesmoke.sourceforge.net
|
W: bluesmoke.sourceforge.net
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/edac/i5400_edac.c
|
F: drivers/edac/i5400_edac.c
|
||||||
|
|
||||||
EDAC-I7300
|
EDAC-I7300
|
||||||
M: Mauro Carvalho Chehab <m.chehab@samsung.com>
|
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
|
||||||
L: linux-edac@vger.kernel.org
|
L: linux-edac@vger.kernel.org
|
||||||
W: bluesmoke.sourceforge.net
|
W: bluesmoke.sourceforge.net
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/edac/i7300_edac.c
|
F: drivers/edac/i7300_edac.c
|
||||||
|
|
||||||
EDAC-I7CORE
|
EDAC-I7CORE
|
||||||
M: Mauro Carvalho Chehab <m.chehab@samsung.com>
|
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
|
||||||
L: linux-edac@vger.kernel.org
|
L: linux-edac@vger.kernel.org
|
||||||
W: bluesmoke.sourceforge.net
|
W: bluesmoke.sourceforge.net
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
@ -3553,7 +3553,7 @@ S: Maintained
|
||||||
F: drivers/edac/r82600_edac.c
|
F: drivers/edac/r82600_edac.c
|
||||||
|
|
||||||
EDAC-SBRIDGE
|
EDAC-SBRIDGE
|
||||||
M: Mauro Carvalho Chehab <m.chehab@samsung.com>
|
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
|
||||||
L: linux-edac@vger.kernel.org
|
L: linux-edac@vger.kernel.org
|
||||||
W: bluesmoke.sourceforge.net
|
W: bluesmoke.sourceforge.net
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
@ -3613,7 +3613,7 @@ S: Maintained
|
||||||
F: drivers/net/ethernet/ibm/ehea/
|
F: drivers/net/ethernet/ibm/ehea/
|
||||||
|
|
||||||
EM28XX VIDEO4LINUX DRIVER
|
EM28XX VIDEO4LINUX DRIVER
|
||||||
M: Mauro Carvalho Chehab <m.chehab@samsung.com>
|
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
W: http://linuxtv.org
|
W: http://linuxtv.org
|
||||||
T: git git://linuxtv.org/media_tree.git
|
T: git git://linuxtv.org/media_tree.git
|
||||||
|
@ -5979,7 +5979,7 @@ S: Maintained
|
||||||
F: drivers/media/radio/radio-maxiradio*
|
F: drivers/media/radio/radio-maxiradio*
|
||||||
|
|
||||||
MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
|
MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
|
||||||
M: Mauro Carvalho Chehab <m.chehab@samsung.com>
|
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
|
||||||
P: LinuxTV.org Project
|
P: LinuxTV.org Project
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
W: http://linuxtv.org
|
W: http://linuxtv.org
|
||||||
|
@ -8030,7 +8030,7 @@ S: Odd Fixes
|
||||||
F: drivers/media/i2c/saa6588*
|
F: drivers/media/i2c/saa6588*
|
||||||
|
|
||||||
SAA7134 VIDEO4LINUX DRIVER
|
SAA7134 VIDEO4LINUX DRIVER
|
||||||
M: Mauro Carvalho Chehab <m.chehab@samsung.com>
|
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
W: http://linuxtv.org
|
W: http://linuxtv.org
|
||||||
T: git git://linuxtv.org/media_tree.git
|
T: git git://linuxtv.org/media_tree.git
|
||||||
|
@ -8488,7 +8488,7 @@ S: Maintained
|
||||||
F: drivers/media/radio/si4713/radio-usb-si4713.c
|
F: drivers/media/radio/si4713/radio-usb-si4713.c
|
||||||
|
|
||||||
SIANO DVB DRIVER
|
SIANO DVB DRIVER
|
||||||
M: Mauro Carvalho Chehab <m.chehab@samsung.com>
|
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
W: http://linuxtv.org
|
W: http://linuxtv.org
|
||||||
T: git git://linuxtv.org/media_tree.git
|
T: git git://linuxtv.org/media_tree.git
|
||||||
|
@ -8699,7 +8699,9 @@ S: Maintained
|
||||||
F: drivers/leds/leds-net48xx.c
|
F: drivers/leds/leds-net48xx.c
|
||||||
|
|
||||||
SOFTLOGIC 6x10 MPEG CODEC
|
SOFTLOGIC 6x10 MPEG CODEC
|
||||||
M: Ismael Luceno <ismael.luceno@corp.bluecherry.net>
|
M: Bluecherry Maintainers <maintainers@bluecherrydvr.com>
|
||||||
|
M: Andrey Utkin <andrey.utkin@corp.bluecherry.net>
|
||||||
|
M: Andrey Utkin <andrey.krieger.utkin@gmail.com>
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/media/pci/solo6x10/
|
F: drivers/media/pci/solo6x10/
|
||||||
|
@ -9173,7 +9175,7 @@ S: Maintained
|
||||||
F: drivers/media/i2c/tda9840*
|
F: drivers/media/i2c/tda9840*
|
||||||
|
|
||||||
TEA5761 TUNER DRIVER
|
TEA5761 TUNER DRIVER
|
||||||
M: Mauro Carvalho Chehab <m.chehab@samsung.com>
|
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
W: http://linuxtv.org
|
W: http://linuxtv.org
|
||||||
T: git git://linuxtv.org/media_tree.git
|
T: git git://linuxtv.org/media_tree.git
|
||||||
|
@ -9181,7 +9183,7 @@ S: Odd fixes
|
||||||
F: drivers/media/tuners/tea5761.*
|
F: drivers/media/tuners/tea5761.*
|
||||||
|
|
||||||
TEA5767 TUNER DRIVER
|
TEA5767 TUNER DRIVER
|
||||||
M: Mauro Carvalho Chehab <m.chehab@samsung.com>
|
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
W: http://linuxtv.org
|
W: http://linuxtv.org
|
||||||
T: git git://linuxtv.org/media_tree.git
|
T: git git://linuxtv.org/media_tree.git
|
||||||
|
@ -9493,7 +9495,7 @@ F: include/linux/shmem_fs.h
|
||||||
F: mm/shmem.c
|
F: mm/shmem.c
|
||||||
|
|
||||||
TM6000 VIDEO4LINUX DRIVER
|
TM6000 VIDEO4LINUX DRIVER
|
||||||
M: Mauro Carvalho Chehab <m.chehab@samsung.com>
|
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
W: http://linuxtv.org
|
W: http://linuxtv.org
|
||||||
T: git git://linuxtv.org/media_tree.git
|
T: git git://linuxtv.org/media_tree.git
|
||||||
|
@ -10314,7 +10316,7 @@ S: Maintained
|
||||||
F: arch/x86/kernel/cpu/mcheck/*
|
F: arch/x86/kernel/cpu/mcheck/*
|
||||||
|
|
||||||
XC2028/3028 TUNER DRIVER
|
XC2028/3028 TUNER DRIVER
|
||||||
M: Mauro Carvalho Chehab <m.chehab@samsung.com>
|
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
W: http://linuxtv.org
|
W: http://linuxtv.org
|
||||||
T: git git://linuxtv.org/media_tree.git
|
T: git git://linuxtv.org/media_tree.git
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
||||||
VERSION = 3
|
VERSION = 3
|
||||||
PATCHLEVEL = 18
|
PATCHLEVEL = 18
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc7
|
EXTRAVERSION =
|
||||||
NAME = Diseased Newt
|
NAME = Diseased Newt
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
|
|
@ -54,12 +54,8 @@ void s390_handle_mcck(void)
|
||||||
*/
|
*/
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
local_mcck_disable();
|
local_mcck_disable();
|
||||||
/*
|
mcck = *this_cpu_ptr(&cpu_mcck);
|
||||||
* Ummm... Does this make sense at all? Copying the percpu struct
|
memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
|
||||||
* and then zapping it one statement later?
|
|
||||||
*/
|
|
||||||
memcpy(&mcck, this_cpu_ptr(&cpu_mcck), sizeof(mcck));
|
|
||||||
memset(&mcck, 0, sizeof(struct mcck_struct));
|
|
||||||
clear_cpu_flag(CIF_MCCK_PENDING);
|
clear_cpu_flag(CIF_MCCK_PENDING);
|
||||||
local_mcck_enable();
|
local_mcck_enable();
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
|
@ -76,7 +76,7 @@ suffix-$(CONFIG_KERNEL_XZ) := xz
|
||||||
suffix-$(CONFIG_KERNEL_LZO) := lzo
|
suffix-$(CONFIG_KERNEL_LZO) := lzo
|
||||||
suffix-$(CONFIG_KERNEL_LZ4) := lz4
|
suffix-$(CONFIG_KERNEL_LZ4) := lz4
|
||||||
|
|
||||||
RUN_SIZE = $(shell objdump -h vmlinux | \
|
RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \
|
||||||
perl $(srctree)/arch/x86/tools/calc_run_size.pl)
|
perl $(srctree)/arch/x86/tools/calc_run_size.pl)
|
||||||
quiet_cmd_mkpiggy = MKPIGGY $@
|
quiet_cmd_mkpiggy = MKPIGGY $@
|
||||||
cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )
|
cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )
|
||||||
|
|
|
@ -465,6 +465,7 @@ static void mc_bp_resume(void)
|
||||||
|
|
||||||
if (uci->valid && uci->mc)
|
if (uci->valid && uci->mc)
|
||||||
microcode_ops->apply_microcode(cpu);
|
microcode_ops->apply_microcode(cpu);
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
else if (!uci->mc)
|
else if (!uci->mc)
|
||||||
/*
|
/*
|
||||||
* We might resume and not have applied late microcode but still
|
* We might resume and not have applied late microcode but still
|
||||||
|
@ -473,6 +474,7 @@ static void mc_bp_resume(void)
|
||||||
* applying patches early on the APs.
|
* applying patches early on the APs.
|
||||||
*/
|
*/
|
||||||
load_ucode_ap();
|
load_ucode_ap();
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct syscore_ops mc_syscore_ops = {
|
static struct syscore_ops mc_syscore_ops = {
|
||||||
|
|
|
@ -216,9 +216,10 @@ static int bio_integrity_process(struct bio *bio,
|
||||||
{
|
{
|
||||||
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
|
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
|
||||||
struct blk_integrity_iter iter;
|
struct blk_integrity_iter iter;
|
||||||
struct bio_vec *bv;
|
struct bvec_iter bviter;
|
||||||
|
struct bio_vec bv;
|
||||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||||
unsigned int i, ret = 0;
|
unsigned int ret = 0;
|
||||||
void *prot_buf = page_address(bip->bip_vec->bv_page) +
|
void *prot_buf = page_address(bip->bip_vec->bv_page) +
|
||||||
bip->bip_vec->bv_offset;
|
bip->bip_vec->bv_offset;
|
||||||
|
|
||||||
|
@ -227,11 +228,11 @@ static int bio_integrity_process(struct bio *bio,
|
||||||
iter.seed = bip_get_seed(bip);
|
iter.seed = bip_get_seed(bip);
|
||||||
iter.prot_buf = prot_buf;
|
iter.prot_buf = prot_buf;
|
||||||
|
|
||||||
bio_for_each_segment_all(bv, bio, i) {
|
bio_for_each_segment(bv, bio, bviter) {
|
||||||
void *kaddr = kmap_atomic(bv->bv_page);
|
void *kaddr = kmap_atomic(bv.bv_page);
|
||||||
|
|
||||||
iter.data_buf = kaddr + bv->bv_offset;
|
iter.data_buf = kaddr + bv.bv_offset;
|
||||||
iter.data_size = bv->bv_len;
|
iter.data_size = bv.bv_len;
|
||||||
|
|
||||||
ret = proc_fn(&iter);
|
ret = proc_fn(&iter);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
|
|
@ -1164,7 +1164,8 @@ static bool acpi_video_device_in_dod(struct acpi_video_device *device)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
for (i = 0; i < video->attached_count; i++) {
|
for (i = 0; i < video->attached_count; i++) {
|
||||||
if (video->attached_array[i].bind_info == device)
|
if ((video->attached_array[i].value.int_val & 0xfff) ==
|
||||||
|
(device->device_id & 0xfff))
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -321,6 +321,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
||||||
{ PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
|
{ PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
|
||||||
{ PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
|
{ PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
|
||||||
{ PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
|
{ PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
|
||||||
|
{ PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
|
||||||
|
{ PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
|
||||||
|
{ PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
|
||||||
{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
|
{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
|
||||||
{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
|
{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
|
||||||
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
|
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
|
||||||
|
@ -492,6 +495,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
||||||
* enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731
|
* enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731
|
||||||
*/
|
*/
|
||||||
{ PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
|
{ PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
|
||||||
|
{ PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_nomsi },
|
||||||
|
|
||||||
/* Enmotus */
|
/* Enmotus */
|
||||||
{ PCI_DEVICE(0x1c44, 0x8000), board_ahci },
|
{ PCI_DEVICE(0x1c44, 0x8000), board_ahci },
|
||||||
|
|
|
@ -1488,7 +1488,7 @@ static int sata_fsl_probe(struct platform_device *ofdev)
|
||||||
host_priv->csr_base = csr_base;
|
host_priv->csr_base = csr_base;
|
||||||
|
|
||||||
irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
|
irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
|
||||||
if (irq < 0) {
|
if (!irq) {
|
||||||
dev_err(&ofdev->dev, "invalid irq from platform\n");
|
dev_err(&ofdev->dev, "invalid irq from platform\n");
|
||||||
goto error_exit_with_cleanup;
|
goto error_exit_with_cleanup;
|
||||||
}
|
}
|
||||||
|
|
|
@ -4565,7 +4565,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
|
||||||
ironlake_fdi_disable(crtc);
|
ironlake_fdi_disable(crtc);
|
||||||
|
|
||||||
ironlake_disable_pch_transcoder(dev_priv, pipe);
|
ironlake_disable_pch_transcoder(dev_priv, pipe);
|
||||||
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
|
|
||||||
|
|
||||||
if (HAS_PCH_CPT(dev)) {
|
if (HAS_PCH_CPT(dev)) {
|
||||||
/* disable TRANS_DP_CTL */
|
/* disable TRANS_DP_CTL */
|
||||||
|
@ -4636,8 +4635,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
|
||||||
|
|
||||||
if (intel_crtc->config.has_pch_encoder) {
|
if (intel_crtc->config.has_pch_encoder) {
|
||||||
lpt_disable_pch_transcoder(dev_priv);
|
lpt_disable_pch_transcoder(dev_priv);
|
||||||
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
|
|
||||||
true);
|
|
||||||
intel_ddi_fdi_disable(crtc);
|
intel_ddi_fdi_disable(crtc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -899,6 +899,17 @@ void intel_lvds_init(struct drm_device *dev)
|
||||||
int pipe;
|
int pipe;
|
||||||
u8 pin;
|
u8 pin;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Unlock registers and just leave them unlocked. Do this before
|
||||||
|
* checking quirk lists to avoid bogus WARNINGs.
|
||||||
|
*/
|
||||||
|
if (HAS_PCH_SPLIT(dev)) {
|
||||||
|
I915_WRITE(PCH_PP_CONTROL,
|
||||||
|
I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
|
||||||
|
} else {
|
||||||
|
I915_WRITE(PP_CONTROL,
|
||||||
|
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
|
||||||
|
}
|
||||||
if (!intel_lvds_supported(dev))
|
if (!intel_lvds_supported(dev))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -1097,17 +1108,6 @@ out:
|
||||||
lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
|
lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
|
||||||
LVDS_A3_POWER_MASK;
|
LVDS_A3_POWER_MASK;
|
||||||
|
|
||||||
/*
|
|
||||||
* Unlock registers and just
|
|
||||||
* leave them unlocked
|
|
||||||
*/
|
|
||||||
if (HAS_PCH_SPLIT(dev)) {
|
|
||||||
I915_WRITE(PCH_PP_CONTROL,
|
|
||||||
I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
|
|
||||||
} else {
|
|
||||||
I915_WRITE(PP_CONTROL,
|
|
||||||
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
|
|
||||||
}
|
|
||||||
lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
|
lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
|
||||||
if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
|
if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
|
||||||
DRM_DEBUG_KMS("lid notifier registration failed\n");
|
DRM_DEBUG_KMS("lid notifier registration failed\n");
|
||||||
|
|
|
@ -218,7 +218,6 @@ nvc0_identify(struct nouveau_device *device)
|
||||||
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
|
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
|
||||||
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
|
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
|
||||||
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
|
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
|
||||||
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
|
|
||||||
device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
|
device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
|
||||||
device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
|
device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -551,8 +551,8 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (status & 0x40000000) {
|
if (status & 0x40000000) {
|
||||||
nouveau_fifo_uevent(&priv->base);
|
|
||||||
nv_wr32(priv, 0x002100, 0x40000000);
|
nv_wr32(priv, 0x002100, 0x40000000);
|
||||||
|
nouveau_fifo_uevent(&priv->base);
|
||||||
status &= ~0x40000000;
|
status &= ~0x40000000;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -740,6 +740,8 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
|
||||||
u32 inte = nv_rd32(priv, 0x002628);
|
u32 inte = nv_rd32(priv, 0x002628);
|
||||||
u32 unkn;
|
u32 unkn;
|
||||||
|
|
||||||
|
nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
|
||||||
|
|
||||||
for (unkn = 0; unkn < 8; unkn++) {
|
for (unkn = 0; unkn < 8; unkn++) {
|
||||||
u32 ints = (intr >> (unkn * 0x04)) & inte;
|
u32 ints = (intr >> (unkn * 0x04)) & inte;
|
||||||
if (ints & 0x1) {
|
if (ints & 0x1) {
|
||||||
|
@ -751,8 +753,6 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
|
||||||
nv_mask(priv, 0x002628, ints, 0);
|
nv_mask(priv, 0x002628, ints, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
|
@ -982,8 +982,8 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stat & 0x80000000) {
|
if (stat & 0x80000000) {
|
||||||
nve0_fifo_intr_engine(priv);
|
|
||||||
nv_wr32(priv, 0x002100, 0x80000000);
|
nv_wr32(priv, 0x002100, 0x80000000);
|
||||||
|
nve0_fifo_intr_engine(priv);
|
||||||
stat &= ~0x80000000;
|
stat &= ~0x80000000;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -664,7 +664,6 @@ nouveau_pmops_suspend(struct device *dev)
|
||||||
|
|
||||||
pci_save_state(pdev);
|
pci_save_state(pdev);
|
||||||
pci_disable_device(pdev);
|
pci_disable_device(pdev);
|
||||||
pci_ignore_hotplug(pdev);
|
|
||||||
pci_set_power_state(pdev, PCI_D3hot);
|
pci_set_power_state(pdev, PCI_D3hot);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -732,6 +731,7 @@ nouveau_pmops_runtime_suspend(struct device *dev)
|
||||||
ret = nouveau_do_suspend(drm_dev, true);
|
ret = nouveau_do_suspend(drm_dev, true);
|
||||||
pci_save_state(pdev);
|
pci_save_state(pdev);
|
||||||
pci_disable_device(pdev);
|
pci_disable_device(pdev);
|
||||||
|
pci_ignore_hotplug(pdev);
|
||||||
pci_set_power_state(pdev, PCI_D3cold);
|
pci_set_power_state(pdev, PCI_D3cold);
|
||||||
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
|
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -52,20 +52,24 @@ nouveau_fctx(struct nouveau_fence *fence)
|
||||||
return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
|
return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static int
|
||||||
nouveau_fence_signal(struct nouveau_fence *fence)
|
nouveau_fence_signal(struct nouveau_fence *fence)
|
||||||
{
|
{
|
||||||
|
int drop = 0;
|
||||||
|
|
||||||
fence_signal_locked(&fence->base);
|
fence_signal_locked(&fence->base);
|
||||||
list_del(&fence->head);
|
list_del(&fence->head);
|
||||||
|
rcu_assign_pointer(fence->channel, NULL);
|
||||||
|
|
||||||
if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
|
if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
|
||||||
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
|
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
|
||||||
|
|
||||||
if (!--fctx->notify_ref)
|
if (!--fctx->notify_ref)
|
||||||
nvif_notify_put(&fctx->notify);
|
drop = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
fence_put(&fence->base);
|
fence_put(&fence->base);
|
||||||
|
return drop;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct nouveau_fence *
|
static struct nouveau_fence *
|
||||||
|
@ -88,16 +92,23 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
|
||||||
{
|
{
|
||||||
struct nouveau_fence *fence;
|
struct nouveau_fence *fence;
|
||||||
|
|
||||||
nvif_notify_fini(&fctx->notify);
|
|
||||||
|
|
||||||
spin_lock_irq(&fctx->lock);
|
spin_lock_irq(&fctx->lock);
|
||||||
while (!list_empty(&fctx->pending)) {
|
while (!list_empty(&fctx->pending)) {
|
||||||
fence = list_entry(fctx->pending.next, typeof(*fence), head);
|
fence = list_entry(fctx->pending.next, typeof(*fence), head);
|
||||||
|
|
||||||
nouveau_fence_signal(fence);
|
if (nouveau_fence_signal(fence))
|
||||||
fence->channel = NULL;
|
nvif_notify_put(&fctx->notify);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&fctx->lock);
|
spin_unlock_irq(&fctx->lock);
|
||||||
|
|
||||||
|
nvif_notify_fini(&fctx->notify);
|
||||||
|
fctx->dead = 1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure that all accesses to fence->channel complete before freeing
|
||||||
|
* the channel.
|
||||||
|
*/
|
||||||
|
synchronize_rcu();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -112,21 +123,23 @@ nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
|
||||||
kref_put(&fctx->fence_ref, nouveau_fence_context_put);
|
kref_put(&fctx->fence_ref, nouveau_fence_context_put);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static int
|
||||||
nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
|
nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
|
||||||
{
|
{
|
||||||
struct nouveau_fence *fence;
|
struct nouveau_fence *fence;
|
||||||
|
int drop = 0;
|
||||||
u32 seq = fctx->read(chan);
|
u32 seq = fctx->read(chan);
|
||||||
|
|
||||||
while (!list_empty(&fctx->pending)) {
|
while (!list_empty(&fctx->pending)) {
|
||||||
fence = list_entry(fctx->pending.next, typeof(*fence), head);
|
fence = list_entry(fctx->pending.next, typeof(*fence), head);
|
||||||
|
|
||||||
if ((int)(seq - fence->base.seqno) < 0)
|
if ((int)(seq - fence->base.seqno) < 0)
|
||||||
return;
|
break;
|
||||||
|
|
||||||
nouveau_fence_signal(fence);
|
drop |= nouveau_fence_signal(fence);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return drop;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -135,18 +148,21 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
|
||||||
struct nouveau_fence_chan *fctx =
|
struct nouveau_fence_chan *fctx =
|
||||||
container_of(notify, typeof(*fctx), notify);
|
container_of(notify, typeof(*fctx), notify);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
int ret = NVIF_NOTIFY_KEEP;
|
||||||
|
|
||||||
spin_lock_irqsave(&fctx->lock, flags);
|
spin_lock_irqsave(&fctx->lock, flags);
|
||||||
if (!list_empty(&fctx->pending)) {
|
if (!list_empty(&fctx->pending)) {
|
||||||
struct nouveau_fence *fence;
|
struct nouveau_fence *fence;
|
||||||
|
struct nouveau_channel *chan;
|
||||||
|
|
||||||
fence = list_entry(fctx->pending.next, typeof(*fence), head);
|
fence = list_entry(fctx->pending.next, typeof(*fence), head);
|
||||||
nouveau_fence_update(fence->channel, fctx);
|
chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
|
||||||
|
if (nouveau_fence_update(fence->channel, fctx))
|
||||||
|
ret = NVIF_NOTIFY_DROP;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&fctx->lock, flags);
|
spin_unlock_irqrestore(&fctx->lock, flags);
|
||||||
|
|
||||||
/* Always return keep here. NVIF refcount is handled with nouveau_fence_update */
|
return ret;
|
||||||
return NVIF_NOTIFY_KEEP;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -262,7 +278,10 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
fence_get(&fence->base);
|
fence_get(&fence->base);
|
||||||
spin_lock_irq(&fctx->lock);
|
spin_lock_irq(&fctx->lock);
|
||||||
nouveau_fence_update(chan, fctx);
|
|
||||||
|
if (nouveau_fence_update(chan, fctx))
|
||||||
|
nvif_notify_put(&fctx->notify);
|
||||||
|
|
||||||
list_add_tail(&fence->head, &fctx->pending);
|
list_add_tail(&fence->head, &fctx->pending);
|
||||||
spin_unlock_irq(&fctx->lock);
|
spin_unlock_irq(&fctx->lock);
|
||||||
}
|
}
|
||||||
|
@ -276,13 +295,16 @@ nouveau_fence_done(struct nouveau_fence *fence)
|
||||||
if (fence->base.ops == &nouveau_fence_ops_legacy ||
|
if (fence->base.ops == &nouveau_fence_ops_legacy ||
|
||||||
fence->base.ops == &nouveau_fence_ops_uevent) {
|
fence->base.ops == &nouveau_fence_ops_uevent) {
|
||||||
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
|
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
|
||||||
|
struct nouveau_channel *chan;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
|
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
spin_lock_irqsave(&fctx->lock, flags);
|
spin_lock_irqsave(&fctx->lock, flags);
|
||||||
nouveau_fence_update(fence->channel, fctx);
|
chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
|
||||||
|
if (chan && nouveau_fence_update(chan, fctx))
|
||||||
|
nvif_notify_put(&fctx->notify);
|
||||||
spin_unlock_irqrestore(&fctx->lock, flags);
|
spin_unlock_irqrestore(&fctx->lock, flags);
|
||||||
}
|
}
|
||||||
return fence_is_signaled(&fence->base);
|
return fence_is_signaled(&fence->base);
|
||||||
|
@ -387,12 +409,18 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
|
||||||
|
|
||||||
if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
|
if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
|
||||||
struct nouveau_channel *prev = NULL;
|
struct nouveau_channel *prev = NULL;
|
||||||
|
bool must_wait = true;
|
||||||
|
|
||||||
f = nouveau_local_fence(fence, chan->drm);
|
f = nouveau_local_fence(fence, chan->drm);
|
||||||
if (f)
|
if (f) {
|
||||||
prev = f->channel;
|
rcu_read_lock();
|
||||||
|
prev = rcu_dereference(f->channel);
|
||||||
|
if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
|
||||||
|
must_wait = false;
|
||||||
|
rcu_read_unlock();
|
||||||
|
}
|
||||||
|
|
||||||
if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan))))
|
if (must_wait)
|
||||||
ret = fence_wait(fence, intr);
|
ret = fence_wait(fence, intr);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -403,19 +431,22 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
|
||||||
|
|
||||||
for (i = 0; i < fobj->shared_count && !ret; ++i) {
|
for (i = 0; i < fobj->shared_count && !ret; ++i) {
|
||||||
struct nouveau_channel *prev = NULL;
|
struct nouveau_channel *prev = NULL;
|
||||||
|
bool must_wait = true;
|
||||||
|
|
||||||
fence = rcu_dereference_protected(fobj->shared[i],
|
fence = rcu_dereference_protected(fobj->shared[i],
|
||||||
reservation_object_held(resv));
|
reservation_object_held(resv));
|
||||||
|
|
||||||
f = nouveau_local_fence(fence, chan->drm);
|
f = nouveau_local_fence(fence, chan->drm);
|
||||||
if (f)
|
if (f) {
|
||||||
prev = f->channel;
|
rcu_read_lock();
|
||||||
|
prev = rcu_dereference(f->channel);
|
||||||
|
if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
|
||||||
|
must_wait = false;
|
||||||
|
rcu_read_unlock();
|
||||||
|
}
|
||||||
|
|
||||||
if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan))))
|
if (must_wait)
|
||||||
ret = fence_wait(fence, intr);
|
ret = fence_wait(fence, intr);
|
||||||
|
|
||||||
if (ret)
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -463,7 +494,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f)
|
||||||
struct nouveau_fence *fence = from_fence(f);
|
struct nouveau_fence *fence = from_fence(f);
|
||||||
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
|
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
|
||||||
|
|
||||||
return fence->channel ? fctx->name : "dead channel";
|
return !fctx->dead ? fctx->name : "dead channel";
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -476,9 +507,16 @@ static bool nouveau_fence_is_signaled(struct fence *f)
|
||||||
{
|
{
|
||||||
struct nouveau_fence *fence = from_fence(f);
|
struct nouveau_fence *fence = from_fence(f);
|
||||||
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
|
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
|
||||||
struct nouveau_channel *chan = fence->channel;
|
struct nouveau_channel *chan;
|
||||||
|
bool ret = false;
|
||||||
|
|
||||||
return (int)(fctx->read(chan) - fence->base.seqno) >= 0;
|
rcu_read_lock();
|
||||||
|
chan = rcu_dereference(fence->channel);
|
||||||
|
if (chan)
|
||||||
|
ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0;
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool nouveau_fence_no_signaling(struct fence *f)
|
static bool nouveau_fence_no_signaling(struct fence *f)
|
||||||
|
|
|
@ -14,7 +14,7 @@ struct nouveau_fence {
|
||||||
|
|
||||||
bool sysmem;
|
bool sysmem;
|
||||||
|
|
||||||
struct nouveau_channel *channel;
|
struct nouveau_channel __rcu *channel;
|
||||||
unsigned long timeout;
|
unsigned long timeout;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@ struct nouveau_fence_chan {
|
||||||
char name[32];
|
char name[32];
|
||||||
|
|
||||||
struct nvif_notify notify;
|
struct nvif_notify notify;
|
||||||
int notify_ref;
|
int notify_ref, dead;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nouveau_fence_priv {
|
struct nouveau_fence_priv {
|
||||||
|
|
|
@ -241,7 +241,6 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
|
||||||
resv = reloc->robj->tbo.resv;
|
resv = reloc->robj->tbo.resv;
|
||||||
r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
|
r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
|
||||||
reloc->tv.shared);
|
reloc->tv.shared);
|
||||||
|
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
|
@ -800,6 +800,8 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
|
||||||
|
|
||||||
/* Get associated drm_crtc: */
|
/* Get associated drm_crtc: */
|
||||||
drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
|
drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
|
||||||
|
if (!drmcrtc)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* Helper routine in DRM core does all the work: */
|
/* Helper routine in DRM core does all the work: */
|
||||||
return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
|
return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
|
||||||
|
|
|
@ -233,6 +233,13 @@ int radeon_bo_create(struct radeon_device *rdev,
|
||||||
if (!(rdev->flags & RADEON_IS_PCIE))
|
if (!(rdev->flags & RADEON_IS_PCIE))
|
||||||
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
|
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
|
||||||
|
* See https://bugs.freedesktop.org/show_bug.cgi?id=84627
|
||||||
|
*/
|
||||||
|
bo->flags &= ~RADEON_GEM_GTT_WC;
|
||||||
|
#endif
|
||||||
|
|
||||||
radeon_ttm_placement_from_domain(bo, domain);
|
radeon_ttm_placement_from_domain(bo, domain);
|
||||||
/* Kernel allocation are uninterruptible */
|
/* Kernel allocation are uninterruptible */
|
||||||
down_read(&rdev->pm.mclk_lock);
|
down_read(&rdev->pm.mclk_lock);
|
||||||
|
|
|
@ -111,6 +111,8 @@
|
||||||
#define CDNS_I2C_DIVA_MAX 4
|
#define CDNS_I2C_DIVA_MAX 4
|
||||||
#define CDNS_I2C_DIVB_MAX 64
|
#define CDNS_I2C_DIVB_MAX 64
|
||||||
|
|
||||||
|
#define CDNS_I2C_TIMEOUT_MAX 0xFF
|
||||||
|
|
||||||
#define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset)
|
#define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset)
|
||||||
#define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset)
|
#define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset)
|
||||||
|
|
||||||
|
@ -852,6 +854,15 @@ static int cdns_i2c_probe(struct platform_device *pdev)
|
||||||
goto err_clk_dis;
|
goto err_clk_dis;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Cadence I2C controller has a bug wherein it generates
|
||||||
|
* invalid read transaction after HW timeout in master receiver mode.
|
||||||
|
* HW timeout is not used by this driver and the interrupt is disabled.
|
||||||
|
* But the feature itself cannot be disabled. Hence maximum value
|
||||||
|
* is written to this register to reduce the chances of error.
|
||||||
|
*/
|
||||||
|
cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
|
||||||
|
|
||||||
dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
|
dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
|
||||||
id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
|
id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
|
||||||
|
|
||||||
|
|
|
@ -407,11 +407,9 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
|
||||||
if (dev->cmd_err & DAVINCI_I2C_STR_NACK) {
|
if (dev->cmd_err & DAVINCI_I2C_STR_NACK) {
|
||||||
if (msg->flags & I2C_M_IGNORE_NAK)
|
if (msg->flags & I2C_M_IGNORE_NAK)
|
||||||
return msg->len;
|
return msg->len;
|
||||||
if (stop) {
|
w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
|
||||||
w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
|
w |= DAVINCI_I2C_MDR_STP;
|
||||||
w |= DAVINCI_I2C_MDR_STP;
|
davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
|
||||||
davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
|
|
||||||
}
|
|
||||||
return -EREMOTEIO;
|
return -EREMOTEIO;
|
||||||
}
|
}
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
|
@ -359,7 +359,7 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Configure Tx/Rx FIFO threshold levels */
|
/* Configure Tx/Rx FIFO threshold levels */
|
||||||
dw_writel(dev, dev->tx_fifo_depth - 1, DW_IC_TX_TL);
|
dw_writel(dev, dev->tx_fifo_depth / 2, DW_IC_TX_TL);
|
||||||
dw_writel(dev, 0, DW_IC_RX_TL);
|
dw_writel(dev, 0, DW_IC_RX_TL);
|
||||||
|
|
||||||
/* configure the i2c master */
|
/* configure the i2c master */
|
||||||
|
|
|
@ -922,14 +922,12 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
|
||||||
if (stat & OMAP_I2C_STAT_NACK) {
|
if (stat & OMAP_I2C_STAT_NACK) {
|
||||||
err |= OMAP_I2C_STAT_NACK;
|
err |= OMAP_I2C_STAT_NACK;
|
||||||
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
|
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stat & OMAP_I2C_STAT_AL) {
|
if (stat & OMAP_I2C_STAT_AL) {
|
||||||
dev_err(dev->dev, "Arbitration lost\n");
|
dev_err(dev->dev, "Arbitration lost\n");
|
||||||
err |= OMAP_I2C_STAT_AL;
|
err |= OMAP_I2C_STAT_AL;
|
||||||
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
|
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -954,11 +952,13 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
|
||||||
if (dev->fifo_size)
|
if (dev->fifo_size)
|
||||||
num_bytes = dev->buf_len;
|
num_bytes = dev->buf_len;
|
||||||
|
|
||||||
omap_i2c_receive_data(dev, num_bytes, true);
|
if (dev->errata & I2C_OMAP_ERRATA_I207) {
|
||||||
|
|
||||||
if (dev->errata & I2C_OMAP_ERRATA_I207)
|
|
||||||
i2c_omap_errata_i207(dev, stat);
|
i2c_omap_errata_i207(dev, stat);
|
||||||
|
num_bytes = (omap_i2c_read_reg(dev,
|
||||||
|
OMAP_I2C_BUFSTAT_REG) >> 8) & 0x3F;
|
||||||
|
}
|
||||||
|
|
||||||
|
omap_i2c_receive_data(dev, num_bytes, true);
|
||||||
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
|
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
|
@ -421,7 +421,7 @@ static int evdev_open(struct inode *inode, struct file *file)
|
||||||
|
|
||||||
err_free_client:
|
err_free_client:
|
||||||
evdev_detach_client(evdev, client);
|
evdev_detach_client(evdev, client);
|
||||||
kfree(client);
|
kvfree(client);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2190,7 +2190,7 @@ static int smiapp_set_selection(struct v4l2_subdev *subdev,
|
||||||
ret = smiapp_set_compose(subdev, fh, sel);
|
ret = smiapp_set_compose(subdev, fh, sel);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
BUG();
|
ret = -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&sensor->mutex);
|
mutex_unlock(&sensor->mutex);
|
||||||
|
|
|
@ -1078,7 +1078,7 @@ static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
|
||||||
for (line = 0; line < lines; line++) {
|
for (line = 0; line < lines; line++) {
|
||||||
while (offset && offset >= sg_dma_len(sg)) {
|
while (offset && offset >= sg_dma_len(sg)) {
|
||||||
offset -= sg_dma_len(sg);
|
offset -= sg_dma_len(sg);
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (lpi && line > 0 && !(line % lpi))
|
if (lpi && line > 0 && !(line % lpi))
|
||||||
|
@ -1101,14 +1101,14 @@ static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
|
||||||
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
|
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
|
||||||
todo -= (sg_dma_len(sg)-offset);
|
todo -= (sg_dma_len(sg)-offset);
|
||||||
offset = 0;
|
offset = 0;
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
while (todo > sg_dma_len(sg)) {
|
while (todo > sg_dma_len(sg)) {
|
||||||
*(rp++) = cpu_to_le32(RISC_WRITE|
|
*(rp++) = cpu_to_le32(RISC_WRITE|
|
||||||
sg_dma_len(sg));
|
sg_dma_len(sg));
|
||||||
*(rp++) = cpu_to_le32(sg_dma_address(sg));
|
*(rp++) = cpu_to_le32(sg_dma_address(sg));
|
||||||
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
|
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
|
||||||
todo -= sg_dma_len(sg);
|
todo -= sg_dma_len(sg);
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
*(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
|
*(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
|
||||||
*(rp++) = cpu_to_le32(sg_dma_address(sg));
|
*(rp++) = cpu_to_le32(sg_dma_address(sg));
|
||||||
|
|
|
@ -105,11 +105,8 @@ static irqreturn_t solo_isr(int irq, void *data)
|
||||||
if (!status)
|
if (!status)
|
||||||
return IRQ_NONE;
|
return IRQ_NONE;
|
||||||
|
|
||||||
if (status & ~solo_dev->irq_mask) {
|
/* Acknowledge all interrupts immediately */
|
||||||
solo_reg_write(solo_dev, SOLO_IRQ_STAT,
|
solo_reg_write(solo_dev, SOLO_IRQ_STAT, status);
|
||||||
status & ~solo_dev->irq_mask);
|
|
||||||
status &= solo_dev->irq_mask;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (status & SOLO_IRQ_PCI_ERR)
|
if (status & SOLO_IRQ_PCI_ERR)
|
||||||
solo_p2m_error_isr(solo_dev);
|
solo_p2m_error_isr(solo_dev);
|
||||||
|
@ -132,9 +129,6 @@ static irqreturn_t solo_isr(int irq, void *data)
|
||||||
if (status & SOLO_IRQ_G723)
|
if (status & SOLO_IRQ_G723)
|
||||||
solo_g723_isr(solo_dev);
|
solo_g723_isr(solo_dev);
|
||||||
|
|
||||||
/* Clear all interrupts handled */
|
|
||||||
solo_reg_write(solo_dev, SOLO_IRQ_STAT, status);
|
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -259,8 +259,8 @@ again:
|
||||||
case 32:
|
case 32:
|
||||||
if ((scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) {
|
if ((scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) {
|
||||||
protocol = RC_TYPE_RC6_MCE;
|
protocol = RC_TYPE_RC6_MCE;
|
||||||
scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
|
|
||||||
toggle = !!(scancode & RC6_6A_MCE_TOGGLE_MASK);
|
toggle = !!(scancode & RC6_6A_MCE_TOGGLE_MASK);
|
||||||
|
scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
|
||||||
} else {
|
} else {
|
||||||
protocol = RC_BIT_RC6_6A_32;
|
protocol = RC_BIT_RC6_6A_32;
|
||||||
toggle = 0;
|
toggle = 0;
|
||||||
|
|
|
@ -632,7 +632,7 @@ static void s2255_fillbuff(struct s2255_vc *vc,
|
||||||
break;
|
break;
|
||||||
case V4L2_PIX_FMT_JPEG:
|
case V4L2_PIX_FMT_JPEG:
|
||||||
case V4L2_PIX_FMT_MJPEG:
|
case V4L2_PIX_FMT_MJPEG:
|
||||||
buf->vb.v4l2_buf.length = jpgsize;
|
vb2_set_plane_payload(&buf->vb, 0, jpgsize);
|
||||||
memcpy(vbuf, tmpbuf, jpgsize);
|
memcpy(vbuf, tmpbuf, jpgsize);
|
||||||
break;
|
break;
|
||||||
case V4L2_PIX_FMT_YUV422P:
|
case V4L2_PIX_FMT_YUV422P:
|
||||||
|
|
|
@ -225,7 +225,12 @@ static int bond_changelink(struct net_device *bond_dev,
|
||||||
|
|
||||||
bond_option_arp_ip_targets_clear(bond);
|
bond_option_arp_ip_targets_clear(bond);
|
||||||
nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
|
nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
|
||||||
__be32 target = nla_get_be32(attr);
|
__be32 target;
|
||||||
|
|
||||||
|
if (nla_len(attr) < sizeof(target))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
target = nla_get_be32(attr);
|
||||||
|
|
||||||
bond_opt_initval(&newval, (__force u64)target);
|
bond_opt_initval(&newval, (__force u64)target);
|
||||||
err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
|
err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
|
||||||
|
|
|
@ -2442,9 +2442,13 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
|
||||||
SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
|
SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
|
||||||
SUPPORTED_10000baseKX4_Full;
|
SUPPORTED_10000baseKX4_Full;
|
||||||
else if (type == FW_PORT_TYPE_FIBER_XFI ||
|
else if (type == FW_PORT_TYPE_FIBER_XFI ||
|
||||||
type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
|
type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) {
|
||||||
v |= SUPPORTED_FIBRE;
|
v |= SUPPORTED_FIBRE;
|
||||||
else if (type == FW_PORT_TYPE_BP40_BA)
|
if (caps & FW_PORT_CAP_SPEED_1G)
|
||||||
|
v |= SUPPORTED_1000baseT_Full;
|
||||||
|
if (caps & FW_PORT_CAP_SPEED_10G)
|
||||||
|
v |= SUPPORTED_10000baseT_Full;
|
||||||
|
} else if (type == FW_PORT_TYPE_BP40_BA)
|
||||||
v |= SUPPORTED_40000baseSR4_Full;
|
v |= SUPPORTED_40000baseSR4_Full;
|
||||||
|
|
||||||
if (caps & FW_PORT_CAP_ANEG)
|
if (caps & FW_PORT_CAP_ANEG)
|
||||||
|
|
|
@ -917,21 +917,13 @@ static int sh_eth_reset(struct net_device *ndev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
|
|
||||||
static void sh_eth_set_receive_align(struct sk_buff *skb)
|
static void sh_eth_set_receive_align(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
int reserve;
|
uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
|
||||||
|
|
||||||
reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
|
|
||||||
if (reserve)
|
if (reserve)
|
||||||
skb_reserve(skb, reserve);
|
skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
static void sh_eth_set_receive_align(struct sk_buff *skb)
|
|
||||||
{
|
|
||||||
skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
/* CPU <-> EDMAC endian convert */
|
/* CPU <-> EDMAC endian convert */
|
||||||
|
@ -1119,6 +1111,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
|
||||||
struct sh_eth_txdesc *txdesc = NULL;
|
struct sh_eth_txdesc *txdesc = NULL;
|
||||||
int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
|
int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
|
||||||
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
|
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
|
||||||
|
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
|
||||||
|
|
||||||
mdp->cur_rx = 0;
|
mdp->cur_rx = 0;
|
||||||
mdp->cur_tx = 0;
|
mdp->cur_tx = 0;
|
||||||
|
@ -1131,21 +1124,21 @@ static void sh_eth_ring_format(struct net_device *ndev)
|
||||||
for (i = 0; i < mdp->num_rx_ring; i++) {
|
for (i = 0; i < mdp->num_rx_ring; i++) {
|
||||||
/* skb */
|
/* skb */
|
||||||
mdp->rx_skbuff[i] = NULL;
|
mdp->rx_skbuff[i] = NULL;
|
||||||
skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
|
skb = netdev_alloc_skb(ndev, skbuff_size);
|
||||||
mdp->rx_skbuff[i] = skb;
|
mdp->rx_skbuff[i] = skb;
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break;
|
break;
|
||||||
dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
|
|
||||||
DMA_FROM_DEVICE);
|
|
||||||
sh_eth_set_receive_align(skb);
|
sh_eth_set_receive_align(skb);
|
||||||
|
|
||||||
/* RX descriptor */
|
/* RX descriptor */
|
||||||
rxdesc = &mdp->rx_ring[i];
|
rxdesc = &mdp->rx_ring[i];
|
||||||
|
/* The size of the buffer is a multiple of 16 bytes. */
|
||||||
|
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
|
||||||
|
dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
|
||||||
|
DMA_FROM_DEVICE);
|
||||||
rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
|
rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
|
||||||
rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
|
rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
|
||||||
|
|
||||||
/* The size of the buffer is 16 byte boundary. */
|
|
||||||
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
|
|
||||||
/* Rx descriptor address set */
|
/* Rx descriptor address set */
|
||||||
if (i == 0) {
|
if (i == 0) {
|
||||||
sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
|
sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
|
||||||
|
@ -1397,6 +1390,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
u16 pkt_len = 0;
|
u16 pkt_len = 0;
|
||||||
u32 desc_status;
|
u32 desc_status;
|
||||||
|
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
|
||||||
|
|
||||||
rxdesc = &mdp->rx_ring[entry];
|
rxdesc = &mdp->rx_ring[entry];
|
||||||
while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
|
while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
|
||||||
|
@ -1448,7 +1442,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
||||||
if (mdp->cd->rpadir)
|
if (mdp->cd->rpadir)
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
skb_reserve(skb, NET_IP_ALIGN);
|
||||||
dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
|
dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
|
||||||
mdp->rx_buf_sz,
|
ALIGN(mdp->rx_buf_sz, 16),
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
skb_put(skb, pkt_len);
|
skb_put(skb, pkt_len);
|
||||||
skb->protocol = eth_type_trans(skb, ndev);
|
skb->protocol = eth_type_trans(skb, ndev);
|
||||||
|
@ -1468,13 +1462,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
||||||
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
|
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
|
||||||
|
|
||||||
if (mdp->rx_skbuff[entry] == NULL) {
|
if (mdp->rx_skbuff[entry] == NULL) {
|
||||||
skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
|
skb = netdev_alloc_skb(ndev, skbuff_size);
|
||||||
mdp->rx_skbuff[entry] = skb;
|
mdp->rx_skbuff[entry] = skb;
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break; /* Better luck next round. */
|
break; /* Better luck next round. */
|
||||||
dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
|
|
||||||
DMA_FROM_DEVICE);
|
|
||||||
sh_eth_set_receive_align(skb);
|
sh_eth_set_receive_align(skb);
|
||||||
|
dma_map_single(&ndev->dev, skb->data,
|
||||||
|
rxdesc->buffer_length, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
skb_checksum_none_assert(skb);
|
skb_checksum_none_assert(skb);
|
||||||
rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
|
rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
|
||||||
|
@ -2042,6 +2036,8 @@ static int sh_eth_open(struct net_device *ndev)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_free_irq;
|
goto out_free_irq;
|
||||||
|
|
||||||
|
mdp->is_opened = 1;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
out_free_irq:
|
out_free_irq:
|
||||||
|
@ -2131,6 +2127,36 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
|
||||||
|
{
|
||||||
|
struct sh_eth_private *mdp = netdev_priv(ndev);
|
||||||
|
|
||||||
|
if (sh_eth_is_rz_fast_ether(mdp))
|
||||||
|
return &ndev->stats;
|
||||||
|
|
||||||
|
if (!mdp->is_opened)
|
||||||
|
return &ndev->stats;
|
||||||
|
|
||||||
|
ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
|
||||||
|
sh_eth_write(ndev, 0, TROCR); /* (write clear) */
|
||||||
|
ndev->stats.collisions += sh_eth_read(ndev, CDCR);
|
||||||
|
sh_eth_write(ndev, 0, CDCR); /* (write clear) */
|
||||||
|
ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
|
||||||
|
sh_eth_write(ndev, 0, LCCR); /* (write clear) */
|
||||||
|
|
||||||
|
if (sh_eth_is_gether(mdp)) {
|
||||||
|
ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
|
||||||
|
sh_eth_write(ndev, 0, CERCR); /* (write clear) */
|
||||||
|
ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
|
||||||
|
sh_eth_write(ndev, 0, CEECR); /* (write clear) */
|
||||||
|
} else {
|
||||||
|
ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
|
||||||
|
sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ndev->stats;
|
||||||
|
}
|
||||||
|
|
||||||
/* device close function */
|
/* device close function */
|
||||||
static int sh_eth_close(struct net_device *ndev)
|
static int sh_eth_close(struct net_device *ndev)
|
||||||
{
|
{
|
||||||
|
@ -2145,6 +2171,7 @@ static int sh_eth_close(struct net_device *ndev)
|
||||||
sh_eth_write(ndev, 0, EDTRR);
|
sh_eth_write(ndev, 0, EDTRR);
|
||||||
sh_eth_write(ndev, 0, EDRRR);
|
sh_eth_write(ndev, 0, EDRRR);
|
||||||
|
|
||||||
|
sh_eth_get_stats(ndev);
|
||||||
/* PHY Disconnect */
|
/* PHY Disconnect */
|
||||||
if (mdp->phydev) {
|
if (mdp->phydev) {
|
||||||
phy_stop(mdp->phydev);
|
phy_stop(mdp->phydev);
|
||||||
|
@ -2163,38 +2190,11 @@ static int sh_eth_close(struct net_device *ndev)
|
||||||
|
|
||||||
pm_runtime_put_sync(&mdp->pdev->dev);
|
pm_runtime_put_sync(&mdp->pdev->dev);
|
||||||
|
|
||||||
|
mdp->is_opened = 0;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
|
|
||||||
{
|
|
||||||
struct sh_eth_private *mdp = netdev_priv(ndev);
|
|
||||||
|
|
||||||
if (sh_eth_is_rz_fast_ether(mdp))
|
|
||||||
return &ndev->stats;
|
|
||||||
|
|
||||||
pm_runtime_get_sync(&mdp->pdev->dev);
|
|
||||||
|
|
||||||
ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
|
|
||||||
sh_eth_write(ndev, 0, TROCR); /* (write clear) */
|
|
||||||
ndev->stats.collisions += sh_eth_read(ndev, CDCR);
|
|
||||||
sh_eth_write(ndev, 0, CDCR); /* (write clear) */
|
|
||||||
ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
|
|
||||||
sh_eth_write(ndev, 0, LCCR); /* (write clear) */
|
|
||||||
if (sh_eth_is_gether(mdp)) {
|
|
||||||
ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
|
|
||||||
sh_eth_write(ndev, 0, CERCR); /* (write clear) */
|
|
||||||
ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
|
|
||||||
sh_eth_write(ndev, 0, CEECR); /* (write clear) */
|
|
||||||
} else {
|
|
||||||
ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
|
|
||||||
sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
|
|
||||||
}
|
|
||||||
pm_runtime_put_sync(&mdp->pdev->dev);
|
|
||||||
|
|
||||||
return &ndev->stats;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ioctl to device function */
|
/* ioctl to device function */
|
||||||
static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
|
static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
|
||||||
{
|
{
|
||||||
|
|
|
@ -162,9 +162,9 @@ enum {
|
||||||
|
|
||||||
/* Driver's parameters */
|
/* Driver's parameters */
|
||||||
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
|
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
|
||||||
#define SH4_SKB_RX_ALIGN 32
|
#define SH_ETH_RX_ALIGN 32
|
||||||
#else
|
#else
|
||||||
#define SH2_SH3_SKB_RX_ALIGN 2
|
#define SH_ETH_RX_ALIGN 2
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Register's bits
|
/* Register's bits
|
||||||
|
@ -522,6 +522,7 @@ struct sh_eth_private {
|
||||||
|
|
||||||
unsigned no_ether_link:1;
|
unsigned no_ether_link:1;
|
||||||
unsigned ether_link_active_low:1;
|
unsigned ether_link_active_low:1;
|
||||||
|
unsigned is_opened:1;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void sh_eth_soft_swap(char *src, int len)
|
static inline void sh_eth_soft_swap(char *src, int len)
|
||||||
|
|
|
@ -265,6 +265,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
plat_dat = dev_get_platdata(&pdev->dev);
|
plat_dat = dev_get_platdata(&pdev->dev);
|
||||||
|
|
||||||
|
if (!plat_dat)
|
||||||
|
plat_dat = devm_kzalloc(&pdev->dev,
|
||||||
|
sizeof(struct plat_stmmacenet_data),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!plat_dat) {
|
||||||
|
pr_err("%s: ERROR: no memory", __func__);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
/* Set default value for multicast hash bins */
|
/* Set default value for multicast hash bins */
|
||||||
plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
|
plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
|
||||||
|
|
||||||
|
@ -272,15 +281,6 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
|
||||||
plat_dat->unicast_filter_entries = 1;
|
plat_dat->unicast_filter_entries = 1;
|
||||||
|
|
||||||
if (pdev->dev.of_node) {
|
if (pdev->dev.of_node) {
|
||||||
if (!plat_dat)
|
|
||||||
plat_dat = devm_kzalloc(&pdev->dev,
|
|
||||||
sizeof(struct plat_stmmacenet_data),
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (!plat_dat) {
|
|
||||||
pr_err("%s: ERROR: no memory", __func__);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
|
ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_err("%s: main dt probe failed", __func__);
|
pr_err("%s: main dt probe failed", __func__);
|
||||||
|
|
|
@ -496,9 +496,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
|
||||||
len = skb_frag_size(frag);
|
len = skb_frag_size(frag);
|
||||||
offset = frag->page_offset;
|
offset = frag->page_offset;
|
||||||
|
|
||||||
/* Data must not cross a page boundary. */
|
|
||||||
BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
|
|
||||||
|
|
||||||
/* Skip unused frames from start of page */
|
/* Skip unused frames from start of page */
|
||||||
page += offset >> PAGE_SHIFT;
|
page += offset >> PAGE_SHIFT;
|
||||||
offset &= ~PAGE_MASK;
|
offset &= ~PAGE_MASK;
|
||||||
|
@ -506,8 +503,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
|
||||||
while (len > 0) {
|
while (len > 0) {
|
||||||
unsigned long bytes;
|
unsigned long bytes;
|
||||||
|
|
||||||
BUG_ON(offset >= PAGE_SIZE);
|
|
||||||
|
|
||||||
bytes = PAGE_SIZE - offset;
|
bytes = PAGE_SIZE - offset;
|
||||||
if (bytes > len)
|
if (bytes > len)
|
||||||
bytes = len;
|
bytes = len;
|
||||||
|
|
|
@ -964,8 +964,6 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
|
||||||
int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
|
int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
|
||||||
phys_addr_t size, bool nomap)
|
phys_addr_t size, bool nomap)
|
||||||
{
|
{
|
||||||
if (memblock_is_region_reserved(base, size))
|
|
||||||
return -EBUSY;
|
|
||||||
if (nomap)
|
if (nomap)
|
||||||
return memblock_remove(base, size);
|
return memblock_remove(base, size);
|
||||||
return memblock_reserve(base, size);
|
return memblock_reserve(base, size);
|
||||||
|
|
|
@ -276,6 +276,7 @@ struct tegra_pcie {
|
||||||
|
|
||||||
struct resource all;
|
struct resource all;
|
||||||
struct resource io;
|
struct resource io;
|
||||||
|
struct resource pio;
|
||||||
struct resource mem;
|
struct resource mem;
|
||||||
struct resource prefetch;
|
struct resource prefetch;
|
||||||
struct resource busn;
|
struct resource busn;
|
||||||
|
@ -658,7 +659,6 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
|
||||||
{
|
{
|
||||||
struct tegra_pcie *pcie = sys_to_pcie(sys);
|
struct tegra_pcie *pcie = sys_to_pcie(sys);
|
||||||
int err;
|
int err;
|
||||||
phys_addr_t io_start;
|
|
||||||
|
|
||||||
err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
|
err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
|
@ -668,14 +668,12 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
io_start = pci_pio_to_address(pcie->io.start);
|
|
||||||
|
|
||||||
pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
|
pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
|
||||||
pci_add_resource_offset(&sys->resources, &pcie->prefetch,
|
pci_add_resource_offset(&sys->resources, &pcie->prefetch,
|
||||||
sys->mem_offset);
|
sys->mem_offset);
|
||||||
pci_add_resource(&sys->resources, &pcie->busn);
|
pci_add_resource(&sys->resources, &pcie->busn);
|
||||||
|
|
||||||
pci_ioremap_io(nr * SZ_64K, io_start);
|
pci_ioremap_io(pcie->pio.start, pcie->io.start);
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -786,7 +784,6 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
|
||||||
static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
|
static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
|
||||||
{
|
{
|
||||||
u32 fpci_bar, size, axi_address;
|
u32 fpci_bar, size, axi_address;
|
||||||
phys_addr_t io_start = pci_pio_to_address(pcie->io.start);
|
|
||||||
|
|
||||||
/* Bar 0: type 1 extended configuration space */
|
/* Bar 0: type 1 extended configuration space */
|
||||||
fpci_bar = 0xfe100000;
|
fpci_bar = 0xfe100000;
|
||||||
|
@ -799,7 +796,7 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
|
||||||
/* Bar 1: downstream IO bar */
|
/* Bar 1: downstream IO bar */
|
||||||
fpci_bar = 0xfdfc0000;
|
fpci_bar = 0xfdfc0000;
|
||||||
size = resource_size(&pcie->io);
|
size = resource_size(&pcie->io);
|
||||||
axi_address = io_start;
|
axi_address = pcie->io.start;
|
||||||
afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
|
afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
|
||||||
afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
|
afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
|
||||||
afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
|
afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
|
||||||
|
@ -1690,8 +1687,23 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
|
||||||
|
|
||||||
switch (res.flags & IORESOURCE_TYPE_BITS) {
|
switch (res.flags & IORESOURCE_TYPE_BITS) {
|
||||||
case IORESOURCE_IO:
|
case IORESOURCE_IO:
|
||||||
memcpy(&pcie->io, &res, sizeof(res));
|
memcpy(&pcie->pio, &res, sizeof(res));
|
||||||
pcie->io.name = np->full_name;
|
pcie->pio.name = np->full_name;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The Tegra PCIe host bridge uses this to program the
|
||||||
|
* mapping of the I/O space to the physical address,
|
||||||
|
* so we override the .start and .end fields here that
|
||||||
|
* of_pci_range_to_resource() converted to I/O space.
|
||||||
|
* We also set the IORESOURCE_MEM type to clarify that
|
||||||
|
* the resource is in the physical memory space.
|
||||||
|
*/
|
||||||
|
pcie->io.start = range.cpu_addr;
|
||||||
|
pcie->io.end = range.cpu_addr + range.size - 1;
|
||||||
|
pcie->io.flags = IORESOURCE_MEM;
|
||||||
|
pcie->io.name = "I/O";
|
||||||
|
|
||||||
|
memcpy(&res, &pcie->io, sizeof(res));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case IORESOURCE_MEM:
|
case IORESOURCE_MEM:
|
||||||
|
|
|
@ -161,7 +161,7 @@ static const struct s3c2410_wdt_variant drv_data_exynos5420 = {
|
||||||
static const struct s3c2410_wdt_variant drv_data_exynos7 = {
|
static const struct s3c2410_wdt_variant drv_data_exynos7 = {
|
||||||
.disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET,
|
.disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET,
|
||||||
.mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET,
|
.mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET,
|
||||||
.mask_bit = 0,
|
.mask_bit = 23,
|
||||||
.rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
|
.rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
|
||||||
.rst_stat_bit = 23, /* A57 WDTRESET */
|
.rst_stat_bit = 23, /* A57 WDTRESET */
|
||||||
.quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
|
.quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
|
||||||
|
|
|
@ -736,7 +736,12 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
|
||||||
}
|
}
|
||||||
|
|
||||||
alias = d_find_alias(inode);
|
alias = d_find_alias(inode);
|
||||||
if (alias && !vfat_d_anon_disconn(alias)) {
|
/*
|
||||||
|
* Checking "alias->d_parent == dentry->d_parent" to make sure
|
||||||
|
* FS is not corrupted (especially double linked dir).
|
||||||
|
*/
|
||||||
|
if (alias && alias->d_parent == dentry->d_parent &&
|
||||||
|
!vfat_d_anon_disconn(alias)) {
|
||||||
/*
|
/*
|
||||||
* This inode has non anonymous-DCACHE_DISCONNECTED
|
* This inode has non anonymous-DCACHE_DISCONNECTED
|
||||||
* dentry. This means, the user did ->lookup() by an
|
* dentry. This means, the user did ->lookup() by an
|
||||||
|
@ -755,12 +760,9 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&MSDOS_SB(sb)->s_lock);
|
mutex_unlock(&MSDOS_SB(sb)->s_lock);
|
||||||
dentry->d_time = dentry->d_parent->d_inode->i_version;
|
if (!inode)
|
||||||
dentry = d_splice_alias(inode, dentry);
|
dentry->d_time = dir->i_version;
|
||||||
if (dentry)
|
return d_splice_alias(inode, dentry);
|
||||||
dentry->d_time = dentry->d_parent->d_inode->i_version;
|
|
||||||
return dentry;
|
|
||||||
|
|
||||||
error:
|
error:
|
||||||
mutex_unlock(&MSDOS_SB(sb)->s_lock);
|
mutex_unlock(&MSDOS_SB(sb)->s_lock);
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
|
@ -793,7 +795,6 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
|
||||||
inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
|
inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
|
||||||
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
|
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
|
||||||
|
|
||||||
dentry->d_time = dentry->d_parent->d_inode->i_version;
|
|
||||||
d_instantiate(dentry, inode);
|
d_instantiate(dentry, inode);
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&MSDOS_SB(sb)->s_lock);
|
mutex_unlock(&MSDOS_SB(sb)->s_lock);
|
||||||
|
@ -824,6 +825,7 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
|
||||||
clear_nlink(inode);
|
clear_nlink(inode);
|
||||||
inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
|
inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
|
||||||
fat_detach(inode);
|
fat_detach(inode);
|
||||||
|
dentry->d_time = dir->i_version;
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&MSDOS_SB(sb)->s_lock);
|
mutex_unlock(&MSDOS_SB(sb)->s_lock);
|
||||||
|
|
||||||
|
@ -849,6 +851,7 @@ static int vfat_unlink(struct inode *dir, struct dentry *dentry)
|
||||||
clear_nlink(inode);
|
clear_nlink(inode);
|
||||||
inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
|
inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
|
||||||
fat_detach(inode);
|
fat_detach(inode);
|
||||||
|
dentry->d_time = dir->i_version;
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&MSDOS_SB(sb)->s_lock);
|
mutex_unlock(&MSDOS_SB(sb)->s_lock);
|
||||||
|
|
||||||
|
@ -889,7 +892,6 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
|
||||||
inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
|
inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
|
||||||
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
|
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
|
||||||
|
|
||||||
dentry->d_time = dentry->d_parent->d_inode->i_version;
|
|
||||||
d_instantiate(dentry, inode);
|
d_instantiate(dentry, inode);
|
||||||
|
|
||||||
mutex_unlock(&MSDOS_SB(sb)->s_lock);
|
mutex_unlock(&MSDOS_SB(sb)->s_lock);
|
||||||
|
|
|
@ -1853,13 +1853,12 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
|
||||||
journal->j_chksum_driver = NULL;
|
journal->j_chksum_driver = NULL;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/* Precompute checksum seed for all metadata */
|
/* Precompute checksum seed for all metadata */
|
||||||
if (jbd2_journal_has_csum_v2or3(journal))
|
|
||||||
journal->j_csum_seed = jbd2_chksum(journal, ~0,
|
journal->j_csum_seed = jbd2_chksum(journal, ~0,
|
||||||
sb->s_uuid,
|
sb->s_uuid,
|
||||||
sizeof(sb->s_uuid));
|
sizeof(sb->s_uuid));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If enabling v1 checksums, downgrade superblock */
|
/* If enabling v1 checksums, downgrade superblock */
|
||||||
|
|
|
@ -427,7 +427,7 @@ header-y += virtio_net.h
|
||||||
header-y += virtio_pci.h
|
header-y += virtio_pci.h
|
||||||
header-y += virtio_ring.h
|
header-y += virtio_ring.h
|
||||||
header-y += virtio_rng.h
|
header-y += virtio_rng.h
|
||||||
header=y += vm_sockets.h
|
header-y += vm_sockets.h
|
||||||
header-y += vt.h
|
header-y += vt.h
|
||||||
header-y += wait.h
|
header-y += wait.h
|
||||||
header-y += wanrouter.h
|
header-y += wanrouter.h
|
||||||
|
|
15
ipc/sem.c
15
ipc/sem.c
|
@ -507,13 +507,6 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
|
|
||||||
if (id < 0) {
|
|
||||||
ipc_rcu_putref(sma, sem_rcu_free);
|
|
||||||
return id;
|
|
||||||
}
|
|
||||||
ns->used_sems += nsems;
|
|
||||||
|
|
||||||
sma->sem_base = (struct sem *) &sma[1];
|
sma->sem_base = (struct sem *) &sma[1];
|
||||||
|
|
||||||
for (i = 0; i < nsems; i++) {
|
for (i = 0; i < nsems; i++) {
|
||||||
|
@ -528,6 +521,14 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
|
||||||
INIT_LIST_HEAD(&sma->list_id);
|
INIT_LIST_HEAD(&sma->list_id);
|
||||||
sma->sem_nsems = nsems;
|
sma->sem_nsems = nsems;
|
||||||
sma->sem_ctime = get_seconds();
|
sma->sem_ctime = get_seconds();
|
||||||
|
|
||||||
|
id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
|
||||||
|
if (id < 0) {
|
||||||
|
ipc_rcu_putref(sma, sem_rcu_free);
|
||||||
|
return id;
|
||||||
|
}
|
||||||
|
ns->used_sems += nsems;
|
||||||
|
|
||||||
sem_unlock(sma, -1);
|
sem_unlock(sma, -1);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
|
|
@ -2874,10 +2874,14 @@ asmlinkage __visible void __sched schedule_user(void)
|
||||||
* or we have been woken up remotely but the IPI has not yet arrived,
|
* or we have been woken up remotely but the IPI has not yet arrived,
|
||||||
* we haven't yet exited the RCU idle mode. Do it here manually until
|
* we haven't yet exited the RCU idle mode. Do it here manually until
|
||||||
* we find a better solution.
|
* we find a better solution.
|
||||||
|
*
|
||||||
|
* NB: There are buggy callers of this function. Ideally we
|
||||||
|
* should warn if prev_state != IN_USER, but that will trigger
|
||||||
|
* too frequently to make sense yet.
|
||||||
*/
|
*/
|
||||||
user_exit();
|
enum ctx_state prev_state = exception_enter();
|
||||||
schedule();
|
schedule();
|
||||||
user_enter();
|
exception_exit(prev_state);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -598,6 +598,7 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
|
||||||
|
|
||||||
return pool;
|
return pool;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(devm_gen_pool_create);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dev_get_gen_pool - Obtain the gen_pool (if any) for a device
|
* dev_get_gen_pool - Obtain the gen_pool (if any) for a device
|
||||||
|
|
|
@ -28,7 +28,7 @@ void show_mem(unsigned int filter)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
total += zone->present_pages;
|
total += zone->present_pages;
|
||||||
reserved = zone->present_pages - zone->managed_pages;
|
reserved += zone->present_pages - zone->managed_pages;
|
||||||
|
|
||||||
if (is_highmem_idx(zoneid))
|
if (is_highmem_idx(zoneid))
|
||||||
highmem += zone->present_pages;
|
highmem += zone->present_pages;
|
||||||
|
|
|
@ -244,8 +244,10 @@ int __frontswap_store(struct page *page)
|
||||||
the (older) page from frontswap
|
the (older) page from frontswap
|
||||||
*/
|
*/
|
||||||
inc_frontswap_failed_stores();
|
inc_frontswap_failed_stores();
|
||||||
if (dup)
|
if (dup) {
|
||||||
__frontswap_clear(sis, offset);
|
__frontswap_clear(sis, offset);
|
||||||
|
frontswap_ops->invalidate_page(type, offset);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (frontswap_writethrough_enabled)
|
if (frontswap_writethrough_enabled)
|
||||||
/* report failure so swap also writes to swap device */
|
/* report failure so swap also writes to swap device */
|
||||||
|
|
24
mm/memory.c
24
mm/memory.c
|
@ -816,20 +816,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||||
if (!pte_file(pte)) {
|
if (!pte_file(pte)) {
|
||||||
swp_entry_t entry = pte_to_swp_entry(pte);
|
swp_entry_t entry = pte_to_swp_entry(pte);
|
||||||
|
|
||||||
if (swap_duplicate(entry) < 0)
|
if (likely(!non_swap_entry(entry))) {
|
||||||
return entry.val;
|
if (swap_duplicate(entry) < 0)
|
||||||
|
return entry.val;
|
||||||
|
|
||||||
/* make sure dst_mm is on swapoff's mmlist. */
|
/* make sure dst_mm is on swapoff's mmlist. */
|
||||||
if (unlikely(list_empty(&dst_mm->mmlist))) {
|
if (unlikely(list_empty(&dst_mm->mmlist))) {
|
||||||
spin_lock(&mmlist_lock);
|
spin_lock(&mmlist_lock);
|
||||||
if (list_empty(&dst_mm->mmlist))
|
if (list_empty(&dst_mm->mmlist))
|
||||||
list_add(&dst_mm->mmlist,
|
list_add(&dst_mm->mmlist,
|
||||||
&src_mm->mmlist);
|
&src_mm->mmlist);
|
||||||
spin_unlock(&mmlist_lock);
|
spin_unlock(&mmlist_lock);
|
||||||
}
|
}
|
||||||
if (likely(!non_swap_entry(entry)))
|
|
||||||
rss[MM_SWAPENTS]++;
|
rss[MM_SWAPENTS]++;
|
||||||
else if (is_migration_entry(entry)) {
|
} else if (is_migration_entry(entry)) {
|
||||||
page = migration_entry_to_page(entry);
|
page = migration_entry_to_page(entry);
|
||||||
|
|
||||||
if (PageAnon(page))
|
if (PageAnon(page))
|
||||||
|
|
10
mm/mmap.c
10
mm/mmap.c
|
@ -776,8 +776,11 @@ again: remove_next = 1 + (end > next->vm_end);
|
||||||
* shrinking vma had, to cover any anon pages imported.
|
* shrinking vma had, to cover any anon pages imported.
|
||||||
*/
|
*/
|
||||||
if (exporter && exporter->anon_vma && !importer->anon_vma) {
|
if (exporter && exporter->anon_vma && !importer->anon_vma) {
|
||||||
if (anon_vma_clone(importer, exporter))
|
int error;
|
||||||
return -ENOMEM;
|
|
||||||
|
error = anon_vma_clone(importer, exporter);
|
||||||
|
if (error)
|
||||||
|
return error;
|
||||||
importer->anon_vma = exporter->anon_vma;
|
importer->anon_vma = exporter->anon_vma;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2469,7 +2472,8 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
if (err)
|
if (err)
|
||||||
goto out_free_vma;
|
goto out_free_vma;
|
||||||
|
|
||||||
if (anon_vma_clone(new, vma))
|
err = anon_vma_clone(new, vma);
|
||||||
|
if (err)
|
||||||
goto out_free_mpol;
|
goto out_free_mpol;
|
||||||
|
|
||||||
if (new->vm_file)
|
if (new->vm_file)
|
||||||
|
|
|
@ -274,6 +274,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
|
||||||
{
|
{
|
||||||
struct anon_vma_chain *avc;
|
struct anon_vma_chain *avc;
|
||||||
struct anon_vma *anon_vma;
|
struct anon_vma *anon_vma;
|
||||||
|
int error;
|
||||||
|
|
||||||
/* Don't bother if the parent process has no anon_vma here. */
|
/* Don't bother if the parent process has no anon_vma here. */
|
||||||
if (!pvma->anon_vma)
|
if (!pvma->anon_vma)
|
||||||
|
@ -283,8 +284,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
|
||||||
* First, attach the new VMA to the parent VMA's anon_vmas,
|
* First, attach the new VMA to the parent VMA's anon_vmas,
|
||||||
* so rmap can find non-COWed pages in child processes.
|
* so rmap can find non-COWed pages in child processes.
|
||||||
*/
|
*/
|
||||||
if (anon_vma_clone(vma, pvma))
|
error = anon_vma_clone(vma, pvma);
|
||||||
return -ENOMEM;
|
if (error)
|
||||||
|
return error;
|
||||||
|
|
||||||
/* Then add our own anon_vma. */
|
/* Then add our own anon_vma. */
|
||||||
anon_vma = anon_vma_alloc();
|
anon_vma = anon_vma_alloc();
|
||||||
|
|
|
@ -3076,7 +3076,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
|
||||||
void *obj;
|
void *obj;
|
||||||
int x;
|
int x;
|
||||||
|
|
||||||
VM_BUG_ON(nodeid > num_online_nodes());
|
VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
|
||||||
n = get_node(cachep, nodeid);
|
n = get_node(cachep, nodeid);
|
||||||
BUG_ON(!n);
|
BUG_ON(!n);
|
||||||
|
|
||||||
|
|
|
@ -165,6 +165,7 @@ static void vmpressure_work_fn(struct work_struct *work)
|
||||||
unsigned long scanned;
|
unsigned long scanned;
|
||||||
unsigned long reclaimed;
|
unsigned long reclaimed;
|
||||||
|
|
||||||
|
spin_lock(&vmpr->sr_lock);
|
||||||
/*
|
/*
|
||||||
* Several contexts might be calling vmpressure(), so it is
|
* Several contexts might be calling vmpressure(), so it is
|
||||||
* possible that the work was rescheduled again before the old
|
* possible that the work was rescheduled again before the old
|
||||||
|
@ -173,11 +174,12 @@ static void vmpressure_work_fn(struct work_struct *work)
|
||||||
* here. No need for any locks here since we don't care if
|
* here. No need for any locks here since we don't care if
|
||||||
* vmpr->reclaimed is in sync.
|
* vmpr->reclaimed is in sync.
|
||||||
*/
|
*/
|
||||||
if (!vmpr->scanned)
|
|
||||||
return;
|
|
||||||
|
|
||||||
spin_lock(&vmpr->sr_lock);
|
|
||||||
scanned = vmpr->scanned;
|
scanned = vmpr->scanned;
|
||||||
|
if (!scanned) {
|
||||||
|
spin_unlock(&vmpr->sr_lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
reclaimed = vmpr->reclaimed;
|
reclaimed = vmpr->reclaimed;
|
||||||
vmpr->scanned = 0;
|
vmpr->scanned = 0;
|
||||||
vmpr->reclaimed = 0;
|
vmpr->reclaimed = 0;
|
||||||
|
|
|
@ -1498,6 +1498,7 @@ static int do_setlink(const struct sk_buff *skb,
|
||||||
goto errout;
|
goto errout;
|
||||||
}
|
}
|
||||||
if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
|
if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
|
||||||
|
put_net(net);
|
||||||
err = -EPERM;
|
err = -EPERM;
|
||||||
goto errout;
|
goto errout;
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,6 +117,7 @@ struct keyring_search_context {
|
||||||
#define KEYRING_SEARCH_NO_UPDATE_TIME 0x0004 /* Don't update times */
|
#define KEYRING_SEARCH_NO_UPDATE_TIME 0x0004 /* Don't update times */
|
||||||
#define KEYRING_SEARCH_NO_CHECK_PERM 0x0008 /* Don't check permissions */
|
#define KEYRING_SEARCH_NO_CHECK_PERM 0x0008 /* Don't check permissions */
|
||||||
#define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0010 /* Give an error on excessive depth */
|
#define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0010 /* Give an error on excessive depth */
|
||||||
|
#define KEYRING_SEARCH_SKIP_EXPIRED 0x0020 /* Ignore expired keys (intention to replace) */
|
||||||
|
|
||||||
int (*iterator)(const void *object, void *iterator_data);
|
int (*iterator)(const void *object, void *iterator_data);
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,8 @@
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
|
#define KEY_MAX_DESC_SIZE 4096
|
||||||
|
|
||||||
static int key_get_type_from_user(char *type,
|
static int key_get_type_from_user(char *type,
|
||||||
const char __user *_type,
|
const char __user *_type,
|
||||||
unsigned len)
|
unsigned len)
|
||||||
|
@ -78,7 +80,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
|
||||||
|
|
||||||
description = NULL;
|
description = NULL;
|
||||||
if (_description) {
|
if (_description) {
|
||||||
description = strndup_user(_description, PAGE_SIZE);
|
description = strndup_user(_description, KEY_MAX_DESC_SIZE);
|
||||||
if (IS_ERR(description)) {
|
if (IS_ERR(description)) {
|
||||||
ret = PTR_ERR(description);
|
ret = PTR_ERR(description);
|
||||||
goto error;
|
goto error;
|
||||||
|
@ -177,7 +179,7 @@ SYSCALL_DEFINE4(request_key, const char __user *, _type,
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
/* pull the description into kernel space */
|
/* pull the description into kernel space */
|
||||||
description = strndup_user(_description, PAGE_SIZE);
|
description = strndup_user(_description, KEY_MAX_DESC_SIZE);
|
||||||
if (IS_ERR(description)) {
|
if (IS_ERR(description)) {
|
||||||
ret = PTR_ERR(description);
|
ret = PTR_ERR(description);
|
||||||
goto error;
|
goto error;
|
||||||
|
@ -287,7 +289,7 @@ long keyctl_join_session_keyring(const char __user *_name)
|
||||||
/* fetch the name from userspace */
|
/* fetch the name from userspace */
|
||||||
name = NULL;
|
name = NULL;
|
||||||
if (_name) {
|
if (_name) {
|
||||||
name = strndup_user(_name, PAGE_SIZE);
|
name = strndup_user(_name, KEY_MAX_DESC_SIZE);
|
||||||
if (IS_ERR(name)) {
|
if (IS_ERR(name)) {
|
||||||
ret = PTR_ERR(name);
|
ret = PTR_ERR(name);
|
||||||
goto error;
|
goto error;
|
||||||
|
@ -562,8 +564,9 @@ long keyctl_describe_key(key_serial_t keyid,
|
||||||
{
|
{
|
||||||
struct key *key, *instkey;
|
struct key *key, *instkey;
|
||||||
key_ref_t key_ref;
|
key_ref_t key_ref;
|
||||||
char *tmpbuf;
|
char *infobuf;
|
||||||
long ret;
|
long ret;
|
||||||
|
int desclen, infolen;
|
||||||
|
|
||||||
key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
|
key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
|
||||||
if (IS_ERR(key_ref)) {
|
if (IS_ERR(key_ref)) {
|
||||||
|
@ -586,38 +589,31 @@ long keyctl_describe_key(key_serial_t keyid,
|
||||||
}
|
}
|
||||||
|
|
||||||
okay:
|
okay:
|
||||||
/* calculate how much description we're going to return */
|
|
||||||
ret = -ENOMEM;
|
|
||||||
tmpbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
|
||||||
if (!tmpbuf)
|
|
||||||
goto error2;
|
|
||||||
|
|
||||||
key = key_ref_to_ptr(key_ref);
|
key = key_ref_to_ptr(key_ref);
|
||||||
|
desclen = strlen(key->description);
|
||||||
|
|
||||||
ret = snprintf(tmpbuf, PAGE_SIZE - 1,
|
/* calculate how much information we're going to return */
|
||||||
"%s;%d;%d;%08x;%s",
|
ret = -ENOMEM;
|
||||||
key->type->name,
|
infobuf = kasprintf(GFP_KERNEL,
|
||||||
from_kuid_munged(current_user_ns(), key->uid),
|
"%s;%d;%d;%08x;",
|
||||||
from_kgid_munged(current_user_ns(), key->gid),
|
key->type->name,
|
||||||
key->perm,
|
from_kuid_munged(current_user_ns(), key->uid),
|
||||||
key->description ?: "");
|
from_kgid_munged(current_user_ns(), key->gid),
|
||||||
|
key->perm);
|
||||||
/* include a NUL char at the end of the data */
|
if (!infobuf)
|
||||||
if (ret > PAGE_SIZE - 1)
|
goto error2;
|
||||||
ret = PAGE_SIZE - 1;
|
infolen = strlen(infobuf);
|
||||||
tmpbuf[ret] = 0;
|
ret = infolen + desclen + 1;
|
||||||
ret++;
|
|
||||||
|
|
||||||
/* consider returning the data */
|
/* consider returning the data */
|
||||||
if (buffer && buflen > 0) {
|
if (buffer && buflen >= ret) {
|
||||||
if (buflen > ret)
|
if (copy_to_user(buffer, infobuf, infolen) != 0 ||
|
||||||
buflen = ret;
|
copy_to_user(buffer + infolen, key->description,
|
||||||
|
desclen + 1) != 0)
|
||||||
if (copy_to_user(buffer, tmpbuf, buflen) != 0)
|
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(tmpbuf);
|
kfree(infobuf);
|
||||||
error2:
|
error2:
|
||||||
key_ref_put(key_ref);
|
key_ref_put(key_ref);
|
||||||
error:
|
error:
|
||||||
|
@ -649,7 +645,7 @@ long keyctl_keyring_search(key_serial_t ringid,
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
description = strndup_user(_description, PAGE_SIZE);
|
description = strndup_user(_description, KEY_MAX_DESC_SIZE);
|
||||||
if (IS_ERR(description)) {
|
if (IS_ERR(description)) {
|
||||||
ret = PTR_ERR(description);
|
ret = PTR_ERR(description);
|
||||||
goto error;
|
goto error;
|
||||||
|
|
|
@ -546,7 +546,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (key->expiry && ctx->now.tv_sec >= key->expiry) {
|
if (key->expiry && ctx->now.tv_sec >= key->expiry) {
|
||||||
ctx->result = ERR_PTR(-EKEYEXPIRED);
|
if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED))
|
||||||
|
ctx->result = ERR_PTR(-EKEYEXPIRED);
|
||||||
kleave(" = %d [expire]", ctx->skipped_ret);
|
kleave(" = %d [expire]", ctx->skipped_ret);
|
||||||
goto skipped;
|
goto skipped;
|
||||||
}
|
}
|
||||||
|
@ -628,6 +629,10 @@ static bool search_nested_keyrings(struct key *keyring,
|
||||||
ctx->index_key.type->name,
|
ctx->index_key.type->name,
|
||||||
ctx->index_key.description);
|
ctx->index_key.description);
|
||||||
|
|
||||||
|
#define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK)
|
||||||
|
BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
|
||||||
|
(ctx->flags & STATE_CHECKS) == STATE_CHECKS);
|
||||||
|
|
||||||
if (ctx->index_key.description)
|
if (ctx->index_key.description)
|
||||||
ctx->index_key.desc_len = strlen(ctx->index_key.description);
|
ctx->index_key.desc_len = strlen(ctx->index_key.description);
|
||||||
|
|
||||||
|
@ -637,7 +642,6 @@ static bool search_nested_keyrings(struct key *keyring,
|
||||||
if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE ||
|
if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE ||
|
||||||
keyring_compare_object(keyring, &ctx->index_key)) {
|
keyring_compare_object(keyring, &ctx->index_key)) {
|
||||||
ctx->skipped_ret = 2;
|
ctx->skipped_ret = 2;
|
||||||
ctx->flags |= KEYRING_SEARCH_DO_STATE_CHECK;
|
|
||||||
switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) {
|
switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) {
|
||||||
case 1:
|
case 1:
|
||||||
goto found;
|
goto found;
|
||||||
|
@ -649,8 +653,6 @@ static bool search_nested_keyrings(struct key *keyring,
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->skipped_ret = 0;
|
ctx->skipped_ret = 0;
|
||||||
if (ctx->flags & KEYRING_SEARCH_NO_STATE_CHECK)
|
|
||||||
ctx->flags &= ~KEYRING_SEARCH_DO_STATE_CHECK;
|
|
||||||
|
|
||||||
/* Start processing a new keyring */
|
/* Start processing a new keyring */
|
||||||
descend_to_keyring:
|
descend_to_keyring:
|
||||||
|
|
|
@ -516,6 +516,8 @@ struct key *request_key_and_link(struct key_type *type,
|
||||||
.match_data.cmp = key_default_cmp,
|
.match_data.cmp = key_default_cmp,
|
||||||
.match_data.raw_data = description,
|
.match_data.raw_data = description,
|
||||||
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
|
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
|
||||||
|
.flags = (KEYRING_SEARCH_DO_STATE_CHECK |
|
||||||
|
KEYRING_SEARCH_SKIP_EXPIRED),
|
||||||
};
|
};
|
||||||
struct key *key;
|
struct key *key;
|
||||||
key_ref_t key_ref;
|
key_ref_t key_ref;
|
||||||
|
|
|
@ -249,6 +249,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
|
||||||
.match_data.cmp = key_default_cmp,
|
.match_data.cmp = key_default_cmp,
|
||||||
.match_data.raw_data = description,
|
.match_data.raw_data = description,
|
||||||
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
|
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
|
||||||
|
.flags = KEYRING_SEARCH_DO_STATE_CHECK,
|
||||||
};
|
};
|
||||||
struct key *authkey;
|
struct key *authkey;
|
||||||
key_ref_t authkey_ref;
|
key_ref_t authkey_ref;
|
||||||
|
|
|
@ -4790,6 +4790,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
||||||
SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
|
SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
|
||||||
SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
|
SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
|
||||||
SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
|
SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
|
||||||
|
SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
|
||||||
|
SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
|
||||||
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
|
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
|
||||||
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
|
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
|
||||||
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
|
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
|
||||||
|
|
Loading…
Add table
Reference in a new issue