Merge android-4.4@73a2b70 (v4.4.92) into msm-4.4

* refs/heads/tmp-73a2b70
  Linux 4.4.92
  ext4: don't allow encrypted operations without keys
  ext4: Don't clear SGID when inheriting ACLs
  ext4: fix data corruption for mmap writes
  sched/cpuset/pm: Fix cpuset vs. suspend-resume bugs
  nvme: protect against simultaneous shutdown invocations
  drm/i915/bios: ignore HDMI on port A
  brcmfmac: setup passive scan if requested by user-space
  uwb: ensure that endpoint is interrupt
  uwb: properly check kthread_run return value
  iio: adc: mcp320x: Fix oops on module unload
  iio: adc: mcp320x: Fix readout of negative voltages
  iio: ad7793: Fix the serial interface reset
  iio: core: Return error for failed read_reg
  staging: iio: ad7192: Fix - use the dedicated reset function avoiding dma from stack.
  iio: ad_sigma_delta: Implement a dedicated reset function
  iio: adc: twl4030: Disable the vusb3v1 rugulator in the error handling path of 'twl4030_madc_probe()'
  iio: adc: twl4030: Fix an error handling path in 'twl4030_madc_probe()'
  xhci: fix finding correct bus_state structure for USB 3.1 hosts
  USB: fix out-of-bounds in usb_set_configuration
  usb: Increase quirk delay for USB devices
  USB: core: harden cdc_parse_cdc_header
  USB: uas: fix bug in handling of alternate settings
  scsi: sd: Do not override max_sectors_kb sysfs setting
  iwlwifi: add workaround to disable wide channels in 5GHz
  HID: i2c-hid: allocate hid buffers for real worst case
  ftrace: Fix kmemleak in unregister_ftrace_graph
  stm class: Fix a use-after-free
  Drivers: hv: fcopy: restore correct transfer length
  driver core: platform: Don't read past the end of "driver_override" buffer
  ALSA: usx2y: Suppress kernel warning at page allocation failures
  ALSA: compress: Remove unused variable
  lsm: fix smack_inode_removexattr and xattr_getsecurity memleak
  USB: g_mass_storage: Fix deadlock when driver is unbound
  usb: gadget: mass_storage: set msg_registered after msg registered
  USB: devio: Don't corrupt user memory
  USB: dummy-hcd: Fix erroneous synchronization change
  USB: dummy-hcd: fix infinite-loop resubmission bug
  USB: dummy-hcd: fix connection failures (wrong speed)
  usb: pci-quirks.c: Corrected timeout values used in handshake
  ALSA: usb-audio: Check out-of-bounds access by corrupted buffer descriptor
  usb: renesas_usbhs: fix usbhsf_fifo_clear() for RX direction
  usb: renesas_usbhs: fix the BCLR setting condition for non-DCP pipe
  usb-storage: unusual_devs entry to fix write-access regression for Seagate external drives
  usb: gadget: udc: atmel: set vbus irqflags explicitly
  USB: gadgetfs: fix copy_to_user while holding spinlock
  USB: gadgetfs: Fix crash caused by inadequate synchronization
  usb: gadget: inode.c: fix unbalanced spin_lock in ep0_write
  ANDROID: binder: init desired_prio.sched_policy before use it
  BACKPORT: net: xfrm: support setting an output mark.
  UPSTREAM: xfrm: Only add l3mdev oif to dst lookups
  UPSTREAM: net: l3mdev: Add master device lookup by index
  Linux 4.4.91
  ttpci: address stringop overflow warning
  ALSA: au88x0: avoid theoretical uninitialized access
  ARM: remove duplicate 'const' annotations'
  IB/qib: fix false-postive maybe-uninitialized warning
  drivers: firmware: psci: drop duplicate const from psci_of_match
  libata: transport: Remove circular dependency at free time
  xfs: remove kmem_zalloc_greedy
  i2c: meson: fix wrong variable usage in meson_i2c_put_data
  md/raid10: submit bio directly to replacement disk
  rds: ib: add error handle
  iommu/io-pgtable-arm: Check for leaf entry before dereferencing it
  parisc: perf: Fix potential NULL pointer dereference
  netfilter: nfnl_cthelper: fix incorrect helper->expect_class_max
  exynos-gsc: Do not swap cb/cr for semi planar formats
  MIPS: IRQ Stack: Unwind IRQ stack onto task stack
  netfilter: invoke synchronize_rcu after set the _hook_ to NULL
  bridge: netlink: register netdevice before executing changelink
  mmc: sdio: fix alignment issue in struct sdio_func
  usb: plusb: Add support for PL-27A1
  team: fix memory leaks
  net/packet: check length in getsockopt() called with PACKET_HDRLEN
  net: core: Prevent from dereferencing null pointer when releasing SKB
  MIPS: Lantiq: Fix another request_mem_region() return code check
  ASoC: dapm: fix some pointer error handling
  usb: chipidea: vbus event may exist before starting gadget
  audit: log 32-bit socketcalls
  ASoC: dapm: handle probe deferrals
  partitions/efi: Fix integer overflow in GPT size calculation
  USB: serial: mos7840: fix control-message error handling
  USB: serial: mos7720: fix control-message error handling
  drm/amdkfd: fix improper return value on error
  IB/ipoib: Replace list_del of the neigh->list with list_del_init
  IB/ipoib: rtnl_unlock can not come after free_netdev
  IB/ipoib: Fix deadlock over vlan_mutex
  tty: goldfish: Fix a parameter of a call to free_irq
  ARM: 8635/1: nommu: allow enabling REMAP_VECTORS_TO_RAM
  iio: adc: hx711: Add DT binding for avia,hx711
  iio: adc: axp288: Drop bogus AXP288_ADC_TS_PIN_CTRL register modifications
  hwmon: (gl520sm) Fix overflows and crash seen when writing into limit attributes
  sh_eth: use correct name for ECMR_MPDE bit
  extcon: axp288: Use vbus-valid instead of -present to determine cable presence
  igb: re-assign hw address pointer on reset after PCI error
  MIPS: ralink: Fix incorrect assignment on ralink_soc
  MIPS: Ensure bss section ends on a long-aligned address
  ARM: dts: r8a7790: Use R-Car Gen 2 fallback binding for msiof nodes
  RDS: RDMA: Fix the composite message user notification
  GFS2: Fix reference to ERR_PTR in gfs2_glock_iter_next
  drm: bridge: add DT bindings for TI ths8135
  drm_fourcc: Fix DRM_FORMAT_MOD_LINEAR #define
  FROMLIST: tracing: Add support for preempt and irq enable/disable events
  FROMLIST: tracing: Prepare to add preempt and irq trace events
  ANDROID: binder: fix transaction leak.
  ANDROID: binder: Add tracing for binder priority inheritance.
  Linux 4.4.90
  fix xen_swiotlb_dma_mmap prototype
  swiotlb-xen: implement xen_swiotlb_dma_mmap callback
  video: fbdev: aty: do not leak uninitialized padding in clk to userspace
  KVM: VMX: use cmpxchg64
  ARM: pxa: fix the number of DMA requestor lines
  ARM: pxa: add the number of DMA requestor lines
  dmaengine: mmp-pdma: add number of requestors
  cxl: Fix driver use count
  KVM: VMX: remove WARN_ON_ONCE in kvm_vcpu_trigger_posted_interrupt
  KVM: VMX: do not change SN bit in vmx_update_pi_irte()
  timer/sysclt: Restrict timer migration sysctl values to 0 and 1
  gfs2: Fix debugfs glocks dump
  x86/fpu: Don't let userspace set bogus xcomp_bv
  btrfs: prevent to set invalid default subvolid
  btrfs: propagate error to btrfs_cmp_data_prepare caller
  btrfs: fix NULL pointer dereference from free_reloc_roots()
  PCI: Fix race condition with driver_override
  kvm: nVMX: Don't allow L2 to access the hardware CR8
  KVM: VMX: Do not BUG() on out-of-bounds guest IRQ
  arm64: fault: Route pte translation faults via do_translation_fault
  arm64: Make sure SPsel is always set
  seccomp: fix the usage of get/put_seccomp_filter() in seccomp_get_filter()
  bsg-lib: don't free job in bsg_prepare_job
  nl80211: check for the required netlink attributes presence
  vfs: Return -ENXIO for negative SEEK_HOLE / SEEK_DATA offsets
  SMB3: Don't ignore O_SYNC/O_DSYNC and O_DIRECT flags
  SMB: Validate negotiate (to protect against downgrade) even if signing off
  Fix SMB3.1.1 guest authentication to Samba
  powerpc/pseries: Fix parent_dn reference leak in add_dt_node()
  KEYS: prevent KEYCTL_READ on negative key
  KEYS: prevent creating a different user's keyrings
  KEYS: fix writing past end of user-supplied buffer in keyring_read()
  crypto: talitos - fix sha224
  crypto: talitos - Don't provide setkey for non hmac hashing algs.
  scsi: scsi_transport_iscsi: fix the issue that iscsi_if_rx doesn't parse nlmsg properly
  md/raid5: preserve STRIPE_ON_UNPLUG_LIST in break_stripe_batch_list
  md/raid5: fix a race condition in stripe batch
  tracing: Erase irqsoff trace with empty write
  tracing: Fix trace_pipe behavior for instance traces
  KVM: PPC: Book3S: Fix race and leak in kvm_vm_ioctl_create_spapr_tce()
  mac80211: flush hw_roc_start work before cancelling the ROC
  cifs: release auth_key.response for reconnect.
  f2fs: catch up to v4.14-rc1
  UPSTREAM: cpufreq: schedutil: use now as reference when aggregating shared policy requests
  ANDROID: add script to fetch android kernel config fragments
  f2fs: reorganize stat information
  f2fs: clean up flush/discard command namings
  f2fs: check in-memory sit version bitmap
  f2fs: check in-memory nat version bitmap
  f2fs: check in-memory block bitmap
  f2fs: introduce FI_ATOMIC_COMMIT
  f2fs: clean up with list_{first, last}_entry
  f2fs: return fs_trim if there is no candidate
  f2fs: avoid needless checkpoint in f2fs_trim_fs
  f2fs: relax async discard commands more
  f2fs: drop exist_data for inline_data when truncated to 0
  f2fs: don't allow encrypted operations without keys
  f2fs: show the max number of atomic operations
  f2fs: get io size bit from mount option
  f2fs: support IO alignment for DATA and NODE writes
  f2fs: add submit_bio tracepoint
  f2fs: reassign new segment for mode=lfs
  f2fs: fix a missing discard prefree segments
  f2fs: use rb_entry_safe
  f2fs: add a case of no need to read a page in write begin
  f2fs: fix a problem of using memory after free
  f2fs: remove unneeded condition
  f2fs: don't cache nat entry if out of memory
  f2fs: remove unused values in recover_fsync_data
  f2fs: support async discard based on v4.9
  f2fs: resolve op and op_flags confilcts
  f2fs: remove wrong backported codes
  FROMLIST: binder: fix use-after-free in binder_transaction()
  UPSTREAM: ipv6: fib: Unlink replaced routes from their nodes

Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>

Conflicts:
	fs/f2fs/crypto_key.c
	fs/f2fs/f2fs_crypto.h
	net/wireless/nl80211.c
	sound/usb/card.c

Change-Id: I742aeaec84c7892165976b7bea3e07bdd6881d93
Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
This commit is contained in:
Blagovest Kolenichev 2017-10-19 16:55:17 -07:00
commit b2465235ad
228 changed files with 9859 additions and 4212 deletions

3
.gitignore vendored
View file

@ -112,3 +112,6 @@ all.config
# Kdevelop4
*.kdev4
# fetched Android config fragments
android/configs/android-*.cfg

View file

@ -57,6 +57,15 @@ Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
Description:
Controls the issue rate of small discard commands.
What: /sys/fs/f2fs/<disk>/discard_granularity
Date: July 2017
Contact: "Chao Yu" <yuchao0@huawei.com>
Description:
Controls discard granularity of inner discard thread, inner thread
will not issue discards with size that is smaller than granularity.
The unit size is one block, now only support configuring in range
of [1, 512].
What: /sys/fs/f2fs/<disk>/max_victim_search
Date: January 2014
Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
@ -92,3 +101,47 @@ Date: October 2015
Contact: "Chao Yu" <chao2.yu@samsung.com>
Description:
Controls the count of nid pages to be readaheaded.
What: /sys/fs/f2fs/<disk>/dirty_nats_ratio
Date: January 2016
Contact: "Chao Yu" <chao2.yu@samsung.com>
Description:
Controls dirty nat entries ratio threshold, if current
ratio exceeds configured threshold, checkpoint will
be triggered for flushing dirty nat entries.
What: /sys/fs/f2fs/<disk>/lifetime_write_kbytes
Date: January 2016
Contact: "Shuoran Liu" <liushuoran@huawei.com>
Description:
Shows total written kbytes issued to disk.
What: /sys/fs/f2fs/<disk>/inject_rate
Date: May 2016
Contact: "Sheng Yong" <shengyong1@huawei.com>
Description:
Controls the injection rate.
What: /sys/fs/f2fs/<disk>/inject_type
Date: May 2016
Contact: "Sheng Yong" <shengyong1@huawei.com>
Description:
Controls the injection type.
What: /sys/fs/f2fs/<disk>/reserved_blocks
Date: June 2017
Contact: "Chao Yu" <yuchao0@huawei.com>
Description:
Controls current reserved blocks in system.
What: /sys/fs/f2fs/<disk>/gc_urgent
Date: August 2017
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
Description:
Do background GC agressively
What: /sys/fs/f2fs/<disk>/gc_urgent_sleep_time
Date: August 2017
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
Description:
Controls sleep time of GC urgent mode

View file

@ -0,0 +1,46 @@
THS8135 Video DAC
-----------------
This is the binding for Texas Instruments THS8135 Video DAC bridge.
Required properties:
- compatible: Must be "ti,ths8135"
Required nodes:
This device has two video ports. Their connections are modelled using the OF
graph bindings specified in Documentation/devicetree/bindings/graph.txt.
- Video port 0 for RGB input
- Video port 1 for VGA output
Example
-------
vga-bridge {
compatible = "ti,ths8135";
#address-cells = <1>;
#size-cells = <0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
vga_bridge_in: endpoint {
remote-endpoint = <&lcdc_out_vga>;
};
};
port@1 {
reg = <1>;
vga_bridge_out: endpoint {
remote-endpoint = <&vga_con_in>;
};
};
};
};

View file

@ -0,0 +1,18 @@
* AVIA HX711 ADC chip for weight cells
Bit-banging driver
Required properties:
- compatible: Should be "avia,hx711"
- sck-gpios: Definition of the GPIO for the clock
- dout-gpios: Definition of the GPIO for data-out
See Documentation/devicetree/bindings/gpio/gpio.txt
- avdd-supply: Definition of the regulator used as analog supply
Example:
weight@0 {
compatible = "avia,hx711";
sck-gpios = <&gpio3 10 GPIO_ACTIVE_HIGH>;
dout-gpios = <&gpio0 7 GPIO_ACTIVE_HIGH>;
avdd-suppy = <&avdd>;
};

View file

@ -32,6 +32,7 @@ asahi-kasei Asahi Kasei Corp.
atmel Atmel Corporation
auo AU Optronics Corporation
avago Avago Technologies
avia avia semiconductor
avic Shanghai AVIC Optoelectronics Co., Ltd.
axis Axis Communications AB
bosch Bosch Sensortec GmbH

View file

@ -125,6 +125,7 @@ active_logs=%u Support configuring the number of active logs. In the
disable_ext_identify Disable the extension list configured by mkfs, so f2fs
does not aware of cold files such as media files.
inline_xattr Enable the inline xattrs feature.
noinline_xattr Disable the inline xattrs feature.
inline_data Enable the inline data feature: New created small(<~3.4k)
files can be written into inode block.
inline_dentry Enable the inline dir feature: data in new created
@ -157,6 +158,20 @@ data_flush Enable data flushing before checkpoint in order to
mode=%s Control block allocation mode which supports "adaptive"
and "lfs". In "lfs" mode, there should be no random
writes towards main area.
io_bits=%u Set the bit size of write IO requests. It should be set
with "mode=lfs".
usrquota Enable plain user disk quota accounting.
grpquota Enable plain group disk quota accounting.
prjquota Enable plain project quota accounting.
usrjquota=<file> Appoint specified file and type during mount, so that quota
grpjquota=<file> information can be properly updated during recovery flow,
prjjquota=<file> <quota file>: must be in root directory;
jqfmt=<quota type> <quota type>: [vfsold,vfsv0,vfsv1].
offusrjquota Turn off user journelled quota.
offgrpjquota Turn off group journelled quota.
offprjjquota Turn off project journelled quota.
quota Enable plain user disk quota accounting.
noquota Disable all plain disk quota option.
================================================================================
DEBUGFS ENTRIES
@ -202,6 +217,15 @@ Files in /sys/fs/f2fs/<devname>
gc_idle = 1 will select the Cost Benefit approach
& setting gc_idle = 2 will select the greedy approach.
gc_urgent This parameter controls triggering background GCs
urgently or not. Setting gc_urgent = 0 [default]
makes back to default behavior, while if it is set
to 1, background thread starts to do GC by given
gc_urgent_sleep_time interval.
gc_urgent_sleep_time This parameter controls sleep time for gc_urgent.
500 ms is set by default. See above gc_urgent.
reclaim_segments This parameter controls the number of prefree
segments to be reclaimed. If the number of prefree
segments is larger than the number of segments

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 89
SUBLEVEL = 92
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -0,0 +1,4 @@
#!/bin/sh
curl https://android.googlesource.com/kernel/configs/+archive/master/android-4.4.tar.gz | tar xzv

View file

@ -34,8 +34,7 @@ config PROCESSOR_ID
used instead of the auto-probing which utilizes the register.
config REMAP_VECTORS_TO_RAM
bool 'Install vectors to the beginning of RAM' if DRAM_BASE
depends on DRAM_BASE
bool 'Install vectors to the beginning of RAM'
help
The kernel needs to change the hardware exception vectors.
In nommu mode, the hardware exception vectors are normally

View file

@ -13,6 +13,7 @@
interrupts = <25>;
#dma-channels = <32>;
#dma-cells = <2>;
#dma-requests = <75>;
status = "okay";
};

View file

@ -12,6 +12,7 @@
interrupts = <25>;
#dma-channels = <32>;
#dma-cells = <2>;
#dma-requests = <100>;
status = "okay";
};

View file

@ -1409,7 +1409,8 @@
};
msiof0: spi@e6e20000 {
compatible = "renesas,msiof-r8a7790";
compatible = "renesas,msiof-r8a7790",
"renesas,rcar-gen2-msiof";
reg = <0 0xe6e20000 0 0x0064>;
interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp0_clks R8A7790_CLK_MSIOF0>;
@ -1422,7 +1423,8 @@
};
msiof1: spi@e6e10000 {
compatible = "renesas,msiof-r8a7790";
compatible = "renesas,msiof-r8a7790",
"renesas,rcar-gen2-msiof";
reg = <0 0xe6e10000 0 0x0064>;
interrupts = <0 157 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7790_CLK_MSIOF1>;
@ -1435,7 +1437,8 @@
};
msiof2: spi@e6e00000 {
compatible = "renesas,msiof-r8a7790";
compatible = "renesas,msiof-r8a7790",
"renesas,rcar-gen2-msiof";
reg = <0 0xe6e00000 0 0x0064>;
interrupts = <0 158 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7790_CLK_MSIOF2>;
@ -1448,7 +1451,8 @@
};
msiof3: spi@e6c90000 {
compatible = "renesas,msiof-r8a7790";
compatible = "renesas,msiof-r8a7790",
"renesas,rcar-gen2-msiof";
reg = <0 0xe6c90000 0 0x0064>;
interrupts = <0 159 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7790_CLK_MSIOF3>;

View file

@ -332,7 +332,7 @@ static void at91sam9_sdram_standby(void)
at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
}
static const struct of_device_id const ramc_ids[] __initconst = {
static const struct of_device_id ramc_ids[] __initconst = {
{ .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
{ .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
{ .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },

View file

@ -33,7 +33,7 @@ struct bcm_kona_smc_data {
unsigned result;
};
static const struct of_device_id const bcm_kona_smc_ids[] __initconst = {
static const struct of_device_id bcm_kona_smc_ids[] __initconst = {
{.compatible = "brcm,kona-smc"},
{.compatible = "bcm,kona-smc"}, /* deprecated name */
{},

View file

@ -346,7 +346,7 @@ static struct usb_ohci_pdata cns3xxx_usb_ohci_pdata = {
.power_off = csn3xxx_usb_power_off,
};
static const struct of_dev_auxdata const cns3xxx_auxdata[] __initconst = {
static const struct of_dev_auxdata cns3xxx_auxdata[] __initconst = {
{ "intel,usb-ehci", CNS3XXX_USB_BASE, "ehci-platform", &cns3xxx_usb_ehci_pdata },
{ "intel,usb-ohci", CNS3XXX_USB_OHCI_BASE, "ohci-platform", &cns3xxx_usb_ohci_pdata },
{ "cavium,cns3420-ahci", CNS3XXX_SATA2_BASE, "ahci", NULL },

View file

@ -706,7 +706,7 @@ static struct omap_prcm_init_data scrm_data __initdata = {
};
#endif
static const struct of_device_id const omap_prcm_dt_match_table[] __initconst = {
static const struct of_device_id omap_prcm_dt_match_table[] __initconst = {
#ifdef CONFIG_SOC_AM33XX
{ .compatible = "ti,am3-prcm", .data = &am3_prm_data },
#endif

View file

@ -559,7 +559,7 @@ struct i2c_init_data {
u8 hsscll_12;
};
static const struct i2c_init_data const omap4_i2c_timing_data[] __initconst = {
static const struct i2c_init_data omap4_i2c_timing_data[] __initconst = {
{
.load = 50,
.loadbits = 0x3,

View file

@ -1203,6 +1203,7 @@ void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info)
static struct mmp_dma_platdata pxa_dma_pdata = {
.dma_channels = 0,
.nb_requestors = 0,
};
static struct resource pxa_dma_resource[] = {
@ -1231,8 +1232,9 @@ static struct platform_device pxa2xx_pxa_dma = {
.resource = pxa_dma_resource,
};
void __init pxa2xx_set_dmac_info(int nb_channels)
void __init pxa2xx_set_dmac_info(int nb_channels, int nb_requestors)
{
pxa_dma_pdata.dma_channels = nb_channels;
pxa_dma_pdata.nb_requestors = nb_requestors;
pxa_register_device(&pxa2xx_pxa_dma, &pxa_dma_pdata);
}

View file

@ -206,7 +206,7 @@ static int __init pxa25x_init(void)
register_syscore_ops(&pxa_irq_syscore_ops);
register_syscore_ops(&pxa2xx_mfp_syscore_ops);
pxa2xx_set_dmac_info(16);
pxa2xx_set_dmac_info(16, 40);
pxa_register_device(&pxa25x_device_gpio, &pxa25x_gpio_info);
ret = platform_add_devices(pxa25x_devices,
ARRAY_SIZE(pxa25x_devices));

View file

@ -309,7 +309,7 @@ static int __init pxa27x_init(void)
if (!of_have_populated_dt()) {
pxa_register_device(&pxa27x_device_gpio,
&pxa27x_gpio_info);
pxa2xx_set_dmac_info(32);
pxa2xx_set_dmac_info(32, 75);
ret = platform_add_devices(devices,
ARRAY_SIZE(devices));
}

View file

@ -450,7 +450,7 @@ static int __init pxa3xx_init(void)
if (of_have_populated_dt())
return 0;
pxa2xx_set_dmac_info(32);
pxa2xx_set_dmac_info(32, 100);
ret = platform_add_devices(devices, ARRAY_SIZE(devices));
if (ret)
return ret;

View file

@ -204,7 +204,7 @@ static void __init spear_clockevent_init(int irq)
setup_irq(irq, &spear_timer_irq);
}
static const struct of_device_id const timer_of_match[] __initconst = {
static const struct of_device_id timer_of_match[] __initconst = {
{ .compatible = "st,spear-timer", },
{ },
};

View file

@ -95,6 +95,6 @@ static inline int pxad_toggle_reserved_channel(int legacy_channel)
}
#endif
extern void __init pxa2xx_set_dmac_info(int nb_channels);
extern void __init pxa2xx_set_dmac_info(int nb_channels, int nb_requestors);
#endif /* __PLAT_DMA_H */

View file

@ -199,6 +199,7 @@ static struct dma_map_ops xen_swiotlb_dma_ops = {
.unmap_page = xen_swiotlb_unmap_page,
.dma_supported = xen_swiotlb_dma_supported,
.set_dma_mask = xen_swiotlb_set_dma_mask,
.mmap = xen_swiotlb_dma_mmap,
};
int __init xen_mm_init(void)

View file

@ -485,6 +485,7 @@ ENTRY(kimage_vaddr)
* booted in EL1 or EL2 respectively.
*/
ENTRY(el2_setup)
msr SPsel, #1 // We want to use SP_EL{1,2}
mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2
b.ne 1f

View file

@ -518,7 +518,7 @@ static const struct fault_info {
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
{ do_bad, SIGBUS, 0, "unknown 8" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },

View file

@ -18,9 +18,24 @@
#include <irq.h>
#define IRQ_STACK_SIZE THREAD_SIZE
#define IRQ_STACK_START (IRQ_STACK_SIZE - sizeof(unsigned long))
extern void *irq_stack[NR_CPUS];
/*
* The highest address on the IRQ stack contains a dummy frame put down in
* genex.S (handle_int & except_vec_vi_handler) which is structured as follows:
*
* top ------------
* | task sp | <- irq_stack[cpu] + IRQ_STACK_START
* ------------
* | | <- First frame of IRQ context
* ------------
*
* task sp holds a copy of the task stack pointer where the struct pt_regs
* from exception entry can be found.
*/
static inline bool on_irq_stack(int cpu, unsigned long sp)
{
unsigned long low = (unsigned long)irq_stack[cpu];

View file

@ -102,6 +102,7 @@ void output_thread_info_defines(void)
DEFINE(_THREAD_SIZE, THREAD_SIZE);
DEFINE(_THREAD_MASK, THREAD_MASK);
DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
DEFINE(_IRQ_STACK_START, IRQ_STACK_START);
BLANK();
}

View file

@ -216,9 +216,11 @@ NESTED(handle_int, PT_SIZE, sp)
beq t0, t1, 2f
/* Switch to IRQ stack */
li t1, _IRQ_STACK_SIZE
li t1, _IRQ_STACK_START
PTR_ADD sp, t0, t1
/* Save task's sp on IRQ stack so that unwinding can follow it */
LONG_S s1, 0(sp)
2:
jal plat_irq_dispatch
@ -326,9 +328,11 @@ NESTED(except_vec_vi_handler, 0, sp)
beq t0, t1, 2f
/* Switch to IRQ stack */
li t1, _IRQ_STACK_SIZE
li t1, _IRQ_STACK_START
PTR_ADD sp, t0, t1
/* Save task's sp on IRQ stack so that unwinding can follow it */
LONG_S s1, 0(sp)
2:
jalr v0

View file

@ -483,31 +483,52 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
unsigned long pc,
unsigned long *ra)
{
unsigned long low, high, irq_stack_high;
struct mips_frame_info info;
unsigned long size, ofs;
struct pt_regs *regs;
int leaf;
extern void ret_from_irq(void);
extern void ret_from_exception(void);
if (!stack_page)
return 0;
/*
* If we reached the bottom of interrupt context,
* return saved pc in pt_regs.
* IRQ stacks start at IRQ_STACK_START
* task stacks at THREAD_SIZE - 32
*/
if (pc == (unsigned long)ret_from_irq ||
pc == (unsigned long)ret_from_exception) {
struct pt_regs *regs;
if (*sp >= stack_page &&
*sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
regs = (struct pt_regs *)*sp;
pc = regs->cp0_epc;
if (!user_mode(regs) && __kernel_text_address(pc)) {
*sp = regs->regs[29];
*ra = regs->regs[31];
return pc;
}
low = stack_page;
if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
high = stack_page + IRQ_STACK_START;
irq_stack_high = high;
} else {
high = stack_page + THREAD_SIZE - 32;
irq_stack_high = 0;
}
/*
* If we reached the top of the interrupt stack, start unwinding
* the interrupted task stack.
*/
if (unlikely(*sp == irq_stack_high)) {
unsigned long task_sp = *(unsigned long *)*sp;
/*
* Check that the pointer saved in the IRQ stack head points to
* something within the stack of the current task
*/
if (!object_is_on_stack((void *)task_sp))
return 0;
/*
* Follow pointer to tasks kernel stack frame where interrupted
* state was saved.
*/
regs = (struct pt_regs *)task_sp;
pc = regs->cp0_epc;
if (!user_mode(regs) && __kernel_text_address(pc)) {
*sp = regs->regs[29];
*ra = regs->regs[31];
return pc;
}
return 0;
}
@ -528,8 +549,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
if (leaf < 0)
return 0;
if (*sp < stack_page ||
*sp + info.frame_size > stack_page + THREAD_SIZE - 32)
if (*sp < low || *sp + info.frame_size > high)
return 0;
if (leaf)

View file

@ -159,7 +159,7 @@ SECTIONS
* Force .bss to 64K alignment so that .bss..swapper_pg_dir
* gets that alignment. .sbss should be empty, so there will be
* no holes after __init_end. */
BSS_SECTION(0, 0x10000, 0)
BSS_SECTION(0, 0x10000, 8)
_end = . ;

View file

@ -469,8 +469,8 @@ void __init ltq_soc_init(void)
panic("Failed to load xbar nodes from devicetree");
if (of_address_to_resource(np_xbar, 0, &res_xbar))
panic("Failed to get xbar resources");
if (request_mem_region(res_xbar.start, resource_size(&res_xbar),
res_xbar.name) < 0)
if (!request_mem_region(res_xbar.start, resource_size(&res_xbar),
res_xbar.name))
panic("Failed to get xbar resources");
ltq_xbar_membase = ioremap_nocache(res_xbar.start,

View file

@ -144,5 +144,5 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
rt2880_pinmux_data = rt3883_pinmux_data;
ralink_soc == RT3883_SOC;
ralink_soc = RT3883_SOC;
}

View file

@ -39,7 +39,7 @@
* the PDC INTRIGUE calls. This is done to eliminate bugs introduced
* in various PDC revisions. The code is much more maintainable
* and reliable this way vs having to debug on every version of PDC
* on every box.
* on every box.
*/
#include <linux/capability.h>
@ -195,8 +195,8 @@ static int perf_config(uint32_t *image_ptr);
static int perf_release(struct inode *inode, struct file *file);
static int perf_open(struct inode *inode, struct file *file);
static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
loff_t *ppos);
static ssize_t perf_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos);
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
static void perf_start_counters(void);
static int perf_stop_counters(uint32_t *raddr);
@ -222,7 +222,7 @@ extern void perf_intrigue_disable_perf_counters (void);
/*
* configure:
*
* Configure the cpu with a given data image. First turn off the counters,
* Configure the cpu with a given data image. First turn off the counters,
* then download the image, then turn the counters back on.
*/
static int perf_config(uint32_t *image_ptr)
@ -234,7 +234,7 @@ static int perf_config(uint32_t *image_ptr)
error = perf_stop_counters(raddr);
if (error != 0) {
printk("perf_config: perf_stop_counters = %ld\n", error);
return -EINVAL;
return -EINVAL;
}
printk("Preparing to write image\n");
@ -242,7 +242,7 @@ printk("Preparing to write image\n");
error = perf_write_image((uint64_t *)image_ptr);
if (error != 0) {
printk("perf_config: DOWNLOAD = %ld\n", error);
return -EINVAL;
return -EINVAL;
}
printk("Preparing to start counters\n");
@ -254,7 +254,7 @@ printk("Preparing to start counters\n");
}
/*
* Open the device and initialize all of its memory. The device is only
* Open the device and initialize all of its memory. The device is only
* opened once, but can be "queried" by multiple processes that know its
* file descriptor.
*/
@ -298,8 +298,8 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t
* called on the processor that the download should happen
* on.
*/
static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
loff_t *ppos)
static ssize_t perf_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
int err;
size_t image_size;
@ -307,11 +307,11 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
uint32_t interface_type;
uint32_t test;
if (perf_processor_interface == ONYX_INTF)
if (perf_processor_interface == ONYX_INTF)
image_size = PCXU_IMAGE_SIZE;
else if (perf_processor_interface == CUDA_INTF)
else if (perf_processor_interface == CUDA_INTF)
image_size = PCXW_IMAGE_SIZE;
else
else
return -EFAULT;
if (!capable(CAP_SYS_ADMIN))
@ -331,22 +331,22 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
/* First check the machine type is correct for
the requested image */
if (((perf_processor_interface == CUDA_INTF) &&
(interface_type != CUDA_INTF)) ||
((perf_processor_interface == ONYX_INTF) &&
(interface_type != ONYX_INTF)))
if (((perf_processor_interface == CUDA_INTF) &&
(interface_type != CUDA_INTF)) ||
((perf_processor_interface == ONYX_INTF) &&
(interface_type != ONYX_INTF)))
return -EINVAL;
/* Next check to make sure the requested image
is valid */
if (((interface_type == CUDA_INTF) &&
if (((interface_type == CUDA_INTF) &&
(test >= MAX_CUDA_IMAGES)) ||
((interface_type == ONYX_INTF) &&
(test >= MAX_ONYX_IMAGES)))
((interface_type == ONYX_INTF) &&
(test >= MAX_ONYX_IMAGES)))
return -EINVAL;
/* Copy the image into the processor */
if (interface_type == CUDA_INTF)
if (interface_type == CUDA_INTF)
return perf_config(cuda_images[test]);
else
return perf_config(onyx_images[test]);
@ -360,7 +360,7 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
static void perf_patch_images(void)
{
#if 0 /* FIXME!! */
/*
/*
* NOTE: this routine is VERY specific to the current TLB image.
* If the image is changed, this routine might also need to be changed.
*/
@ -368,9 +368,9 @@ static void perf_patch_images(void)
extern void $i_dtlb_miss_2_0();
extern void PA2_0_iva();
/*
/*
* We can only use the lower 32-bits, the upper 32-bits should be 0
* anyway given this is in the kernel
* anyway given this is in the kernel
*/
uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0);
uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0);
@ -378,21 +378,21 @@ static void perf_patch_images(void)
if (perf_processor_interface == ONYX_INTF) {
/* clear last 2 bytes */
onyx_images[TLBMISS][15] &= 0xffffff00;
onyx_images[TLBMISS][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
onyx_images[TLBMISS][17] = itlb_addr;
/* clear last 2 bytes */
onyx_images[TLBHANDMISS][15] &= 0xffffff00;
onyx_images[TLBHANDMISS][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
onyx_images[TLBHANDMISS][17] = itlb_addr;
/* clear last 2 bytes */
onyx_images[BIG_CPI][15] &= 0xffffff00;
onyx_images[BIG_CPI][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
@ -405,24 +405,24 @@ static void perf_patch_images(void)
} else if (perf_processor_interface == CUDA_INTF) {
/* Cuda interface */
cuda_images[TLBMISS][16] =
cuda_images[TLBMISS][16] =
(cuda_images[TLBMISS][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
cuda_images[TLBMISS][17] =
cuda_images[TLBMISS][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
cuda_images[TLBHANDMISS][16] =
cuda_images[TLBHANDMISS][16] =
(cuda_images[TLBHANDMISS][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
cuda_images[TLBHANDMISS][17] =
cuda_images[TLBHANDMISS][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
cuda_images[BIG_CPI][16] =
cuda_images[BIG_CPI][16] =
(cuda_images[BIG_CPI][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
cuda_images[BIG_CPI][17] =
cuda_images[BIG_CPI][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
} else {
@ -434,7 +434,7 @@ static void perf_patch_images(void)
/*
* ioctl routine
* All routines effect the processor that they are executed on. Thus you
* All routines effect the processor that they are executed on. Thus you
* must be running on the processor that you wish to change.
*/
@ -460,7 +460,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
/* copy out the Counters */
if (copy_to_user((void __user *)arg, raddr,
if (copy_to_user((void __user *)arg, raddr,
sizeof (raddr)) != 0) {
error = -EFAULT;
break;
@ -488,7 +488,7 @@ static const struct file_operations perf_fops = {
.open = perf_open,
.release = perf_release
};
static struct miscdevice perf_dev = {
MISC_DYNAMIC_MINOR,
PA_PERF_DEV,
@ -596,7 +596,7 @@ static int perf_stop_counters(uint32_t *raddr)
/* OR sticky2 (bit 1496) to counter2 bit 32 */
tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
raddr[2] = (uint32_t)tmp64;
/* Counter3 is bits 1497 to 1528 */
tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff;
/* OR sticky3 (bit 1529) to counter3 bit 32 */
@ -618,7 +618,7 @@ static int perf_stop_counters(uint32_t *raddr)
userbuf[22] = 0;
userbuf[23] = 0;
/*
/*
* Write back the zeroed bytes + the image given
* the read was destructive.
*/
@ -626,13 +626,13 @@ static int perf_stop_counters(uint32_t *raddr)
} else {
/*
* Read RDR-15 which contains the counters and sticky bits
* Read RDR-15 which contains the counters and sticky bits
*/
if (!perf_rdr_read_ubuf(15, userbuf)) {
return -13;
}
/*
/*
* Clear out the counters
*/
perf_rdr_clear(15);
@ -645,7 +645,7 @@ static int perf_stop_counters(uint32_t *raddr)
raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
}
return 0;
}
@ -683,7 +683,7 @@ static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer)
i = tentry->num_words;
while (i--) {
buffer[i] = 0;
}
}
/* Check for bits an even number of 64 */
if ((xbits = width & 0x03f) != 0) {
@ -809,18 +809,22 @@ static int perf_write_image(uint64_t *memaddr)
}
runway = ioremap_nocache(cpu_device->hpa.start, 4096);
if (!runway) {
pr_err("perf_write_image: ioremap failed!\n");
return -ENOMEM;
}
/* Merge intrigue bits into Runway STATUS 0 */
tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
__raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
__raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
runway + RUNWAY_STATUS);
/* Write RUNWAY DEBUG registers */
for (i = 0; i < 8; i++) {
__raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
}
return 0;
return 0;
}
/*
@ -844,7 +848,7 @@ printk("perf_rdr_write\n");
perf_rdr_shift_out_U(rdr_num, buffer[i]);
} else {
perf_rdr_shift_out_W(rdr_num, buffer[i]);
}
}
}
printk("perf_rdr_write done\n");
}

View file

@ -101,22 +101,17 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args)
{
struct kvmppc_spapr_tce_table *stt = NULL;
struct kvmppc_spapr_tce_table *siter;
long npages;
int ret = -ENOMEM;
int i;
/* Check this LIOBN hasn't been previously allocated */
list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
if (stt->liobn == args->liobn)
return -EBUSY;
}
npages = kvmppc_stt_npages(args->window_size);
stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
GFP_KERNEL);
if (!stt)
goto fail;
return ret;
stt->liobn = args->liobn;
stt->window_size = args->window_size;
@ -128,23 +123,36 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
goto fail;
}
kvm_get_kvm(kvm);
mutex_lock(&kvm->lock);
list_add(&stt->list, &kvm->arch.spapr_tce_tables);
/* Check this LIOBN hasn't been previously allocated */
ret = 0;
list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
if (siter->liobn == args->liobn) {
ret = -EBUSY;
break;
}
}
if (!ret)
ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
stt, O_RDWR | O_CLOEXEC);
if (ret >= 0) {
list_add(&stt->list, &kvm->arch.spapr_tce_tables);
kvm_get_kvm(kvm);
}
mutex_unlock(&kvm->lock);
return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
stt, O_RDWR | O_CLOEXEC);
if (ret >= 0)
return ret;
fail:
if (stt) {
for (i = 0; i < npages; i++)
if (stt->pages[i])
__free_page(stt->pages[i]);
fail:
for (i = 0; i < npages; i++)
if (stt->pages[i])
__free_page(stt->pages[i]);
kfree(stt);
}
kfree(stt);
return ret;
}

View file

@ -225,8 +225,10 @@ static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
return -ENOENT;
dn = dlpar_configure_connector(drc_index, parent_dn);
if (!dn)
if (!dn) {
of_node_put(parent_dn);
return -ENOENT;
}
rc = dlpar_attach_node(dn);
if (rc)

View file

@ -116,6 +116,11 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
xsave = &fpu->state.xsave;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
/* xcomp_bv must be 0 when using uncompacted format */
if (!ret && xsave->header.xcomp_bv)
ret = -EINVAL;
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
@ -126,6 +131,12 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
*/
memset(&xsave->header.reserved, 0, 48);
/*
* In case of failure, mark all states as init:
*/
if (ret)
fpstate_init(&fpu->state);
return ret;
}

View file

@ -309,7 +309,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
fpu__drop(fpu);
if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) ||
__copy_from_user(&env, buf, sizeof(env))) {
__copy_from_user(&env, buf, sizeof(env)) ||
(state_size > offsetof(struct xregs_state, header) &&
fpu->state.xsave.header.xcomp_bv)) {
fpstate_init(&fpu->state);
err = -1;
} else {

View file

@ -2029,8 +2029,8 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
/* Allow posting non-urgent interrupts */
new.sn = 0;
} while (cmpxchg(&pi_desc->control, old.control,
new.control) != old.control);
} while (cmpxchg64(&pi_desc->control, old.control,
new.control) != old.control);
}
/*
* Switches to specified vcpu, until a matching vcpu_put(), but assumes
@ -4541,21 +4541,30 @@ static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_SMP
if (vcpu->mode == IN_GUEST_MODE) {
struct vcpu_vmx *vmx = to_vmx(vcpu);
/*
* Currently, we don't support urgent interrupt,
* all interrupts are recognized as non-urgent
* interrupt, so we cannot post interrupts when
* 'SN' is set.
* The vector of interrupt to be delivered to vcpu had
* been set in PIR before this function.
*
* If the vcpu is in guest mode, it means it is
* running instead of being scheduled out and
* waiting in the run queue, and that's the only
* case when 'SN' is set currently, warning if
* 'SN' is set.
* Following cases will be reached in this block, and
* we always send a notification event in all cases as
* explained below.
*
* Case 1: vcpu keeps in non-root mode. Sending a
* notification event posts the interrupt to vcpu.
*
* Case 2: vcpu exits to root mode and is still
* runnable. PIR will be synced to vIRR before the
* next vcpu entry. Sending a notification event in
* this case has no effect, as vcpu is not in root
* mode.
*
* Case 3: vcpu exits to root mode and is blocked.
* vcpu_block() has already synced PIR to vIRR and
* never blocks vcpu if vIRR is not cleared. Therefore,
* a blocked vcpu here does not wait for any requested
* interrupts in PIR, and sending a notification event
* which has no effect is safe here.
*/
WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc));
apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
POSTED_INTR_VECTOR);
@ -9683,6 +9692,11 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
page_to_phys(vmx->nested.virtual_apic_page));
vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
} else {
#ifdef CONFIG_X86_64
exec_control |= CPU_BASED_CR8_LOAD_EXITING |
CPU_BASED_CR8_STORE_EXITING;
#endif
}
if (cpu_has_vmx_msr_bitmap() &&
@ -10691,8 +10705,8 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu)
/* set 'NV' to 'wakeup vector' */
new.nv = POSTED_INTR_WAKEUP_VECTOR;
} while (cmpxchg(&pi_desc->control, old.control,
new.control) != old.control);
} while (cmpxchg64(&pi_desc->control, old.control,
new.control) != old.control);
return 0;
}
@ -10723,8 +10737,8 @@ static void vmx_post_block(struct kvm_vcpu *vcpu)
/* set 'NV' to 'notification vector' */
new.nv = POSTED_INTR_VECTOR;
} while (cmpxchg(&pi_desc->control, old.control,
new.control) != old.control);
} while (cmpxchg64(&pi_desc->control, old.control,
new.control) != old.control);
if(vcpu->pre_pcpu != -1) {
spin_lock_irqsave(
@ -10755,7 +10769,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
struct kvm_lapic_irq irq;
struct kvm_vcpu *vcpu;
struct vcpu_data vcpu_info;
int idx, ret = -EINVAL;
int idx, ret = 0;
if (!kvm_arch_has_assigned_device(kvm) ||
!irq_remapping_cap(IRQ_POSTING_CAP))
@ -10763,7 +10777,12 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
idx = srcu_read_lock(&kvm->irq_srcu);
irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
BUG_ON(guest_irq >= irq_rt->nr_rt_entries);
if (guest_irq >= irq_rt->nr_rt_entries ||
hlist_empty(&irq_rt->map[guest_irq])) {
pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
guest_irq, irq_rt->nr_rt_entries);
goto out;
}
hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
if (e->type != KVM_IRQ_ROUTING_MSI)
@ -10793,12 +10812,8 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
if (set)
ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
else {
/* suppress notification event before unposting */
pi_set_sn(vcpu_to_pi_desc(vcpu));
else
ret = irq_set_vcpu_affinity(host_irq, NULL);
pi_clear_sn(vcpu_to_pi_desc(vcpu));
}
if (ret < 0) {
printk(KERN_INFO "%s: failed to update PI IRTE\n",

View file

@ -147,7 +147,6 @@ static int bsg_create_job(struct device *dev, struct request *req)
failjob_rls_rqst_payload:
kfree(job->request_payload.sg_list);
failjob_rls_job:
kfree(job);
return -ENOMEM;
}

View file

@ -293,7 +293,7 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
if (!gpt)
return NULL;
count = le32_to_cpu(gpt->num_partition_entries) *
count = (size_t)le32_to_cpu(gpt->num_partition_entries) *
le32_to_cpu(gpt->sizeof_partition_entry);
if (!count)
return NULL;
@ -352,7 +352,7 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
gpt_header **gpt, gpt_entry **ptes)
{
u32 crc, origcrc;
u64 lastlba;
u64 lastlba, pt_size;
if (!ptes)
return 0;
@ -434,13 +434,20 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
goto fail;
}
/* Sanity check partition table size */
pt_size = (u64)le32_to_cpu((*gpt)->num_partition_entries) *
le32_to_cpu((*gpt)->sizeof_partition_entry);
if (pt_size > KMALLOC_MAX_SIZE) {
pr_debug("GUID Partition Table is too large: %llu > %lu bytes\n",
(unsigned long long)pt_size, KMALLOC_MAX_SIZE);
goto fail;
}
if (!(*ptes = alloc_read_gpt_entries(state, *gpt)))
goto fail;
/* Check the GUID Partition Entry Array CRC */
crc = efi_crc32((const unsigned char *) (*ptes),
le32_to_cpu((*gpt)->num_partition_entries) *
le32_to_cpu((*gpt)->sizeof_partition_entry));
crc = efi_crc32((const unsigned char *) (*ptes), pt_size);
if (crc != le32_to_cpu((*gpt)->partition_entry_array_crc32)) {
pr_debug("GUID Partitition Entry Array CRC check failed.\n");

View file

@ -1154,6 +1154,10 @@ static void binder_do_set_priority(struct task_struct *task,
task->pid, desired.prio,
to_kernel_prio(policy, priority));
trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
to_kernel_prio(policy, priority),
desired.prio);
/* Set the actual priority */
if (task->policy != policy || is_rt_policy(policy)) {
struct sched_param params;
@ -1185,7 +1189,7 @@ static void binder_transaction_priority(struct task_struct *task,
struct binder_priority node_prio,
bool inherit_rt)
{
struct binder_priority desired_prio;
struct binder_priority desired_prio = t->priority;
if (t->set_priority_called)
return;
@ -1197,9 +1201,6 @@ static void binder_transaction_priority(struct task_struct *task,
if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
desired_prio.prio = NICE_TO_PRIO(0);
desired_prio.sched_policy = SCHED_NORMAL;
} else {
desired_prio.prio = t->priority.prio;
desired_prio.sched_policy = t->priority.sched_policy;
}
if (node_prio.prio < t->priority.prio ||
@ -2102,6 +2103,26 @@ static void binder_send_failed_reply(struct binder_transaction *t,
}
}
/**
* binder_cleanup_transaction() - cleans up undelivered transaction
* @t: transaction that needs to be cleaned up
* @reason: reason the transaction wasn't delivered
* @error_code: error to return to caller (if synchronous call)
*/
static void binder_cleanup_transaction(struct binder_transaction *t,
const char *reason,
uint32_t error_code)
{
if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
binder_send_failed_reply(t, error_code);
} else {
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered transaction %d, %s\n",
t->debug_id, reason);
binder_free_transaction(t);
}
}
/**
* binder_validate_object() - checks for a valid metadata object in a buffer.
* @buffer: binder_buffer that we're parsing.
@ -2744,6 +2765,48 @@ static bool binder_proc_transaction(struct binder_transaction *t,
return true;
}
/**
* binder_get_node_refs_for_txn() - Get required refs on node for txn
* @node: struct binder_node for which to get refs
* @proc: returns @node->proc if valid
* @error: if no @proc then returns BR_DEAD_REPLY
*
* User-space normally keeps the node alive when creating a transaction
* since it has a reference to the target. The local strong ref keeps it
* alive if the sending process dies before the target process processes
* the transaction. If the source process is malicious or has a reference
* counting bug, relying on the local strong ref can fail.
*
* Since user-space can cause the local strong ref to go away, we also take
* a tmpref on the node to ensure it survives while we are constructing
* the transaction. We also need a tmpref on the proc while we are
* constructing the transaction, so we take that here as well.
*
* Return: The target_node with refs taken or NULL if no @node->proc is NULL.
* Also sets @proc if valid. If the @node->proc is NULL indicating that the
* target proc has died, @error is set to BR_DEAD_REPLY
*/
static struct binder_node *binder_get_node_refs_for_txn(
struct binder_node *node,
struct binder_proc **procp,
uint32_t *error)
{
struct binder_node *target_node = NULL;
binder_node_inner_lock(node);
if (node->proc) {
target_node = node;
binder_inc_node_nilocked(node, 1, 0, NULL);
binder_inc_node_tmpref_ilocked(node);
node->proc->tmp_ref++;
*procp = node->proc;
} else
*error = BR_DEAD_REPLY;
binder_node_inner_unlock(node);
return target_node;
}
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
@ -2846,43 +2909,35 @@ static void binder_transaction(struct binder_proc *proc,
ref = binder_get_ref_olocked(proc, tr->target.handle,
true);
if (ref) {
binder_inc_node(ref->node, 1, 0, NULL);
target_node = ref->node;
target_node = binder_get_node_refs_for_txn(
ref->node, &target_proc,
&return_error);
} else {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
}
binder_proc_unlock(proc);
if (target_node == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_invalid_target_handle;
}
} else {
mutex_lock(&context->context_mgr_node_lock);
target_node = context->binder_context_mgr_node;
if (target_node == NULL) {
if (target_node)
target_node = binder_get_node_refs_for_txn(
target_node, &target_proc,
&return_error);
else
return_error = BR_DEAD_REPLY;
mutex_unlock(&context->context_mgr_node_lock);
return_error_line = __LINE__;
goto err_no_context_mgr_node;
}
binder_inc_node(target_node, 1, 0, NULL);
mutex_unlock(&context->context_mgr_node_lock);
}
e->to_node = target_node->debug_id;
binder_node_lock(target_node);
target_proc = target_node->proc;
if (target_proc == NULL) {
binder_node_unlock(target_node);
return_error = BR_DEAD_REPLY;
if (!target_node) {
/*
* return_error is set above
*/
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_dead_binder;
}
binder_inner_proc_lock(target_proc);
target_proc->tmp_ref++;
binder_inner_proc_unlock(target_proc);
binder_node_unlock(target_node);
e->to_node = target_node->debug_id;
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
@ -3241,6 +3296,8 @@ static void binder_transaction(struct binder_proc *proc,
if (target_thread)
binder_thread_dec_tmpref(target_thread);
binder_proc_dec_tmpref(target_proc);
if (target_node)
binder_dec_node_tmpref(target_node);
/*
* write barrier to synchronize with initialization
* of log entry
@ -3260,6 +3317,8 @@ err_bad_parent:
err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp);
if (target_node)
binder_dec_node_tmpref(target_node);
target_node = NULL;
t->buffer->transaction = NULL;
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
@ -3274,13 +3333,14 @@ err_bad_call_stack:
err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
err_no_context_mgr_node:
if (target_thread)
binder_thread_dec_tmpref(target_thread);
if (target_proc)
binder_proc_dec_tmpref(target_proc);
if (target_node)
if (target_node) {
binder_dec_node(target_node, 1, 0);
binder_dec_node_tmpref(target_node);
}
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
@ -4145,12 +4205,20 @@ retry:
if (put_user(cmd, (uint32_t __user *)ptr)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
binder_cleanup_transaction(t, "put_user failed",
BR_FAILED_REPLY);
return -EFAULT;
}
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr))) {
if (t_from)
binder_thread_dec_tmpref(t_from);
binder_cleanup_transaction(t, "copy_to_user failed",
BR_FAILED_REPLY);
return -EFAULT;
}
ptr += sizeof(tr);
@ -4220,15 +4288,9 @@ static void binder_release_work(struct binder_proc *proc,
struct binder_transaction *t;
t = container_of(w, struct binder_transaction, work);
if (t->buffer->target_node &&
!(t->flags & TF_ONE_WAY)) {
binder_send_failed_reply(t, BR_DEAD_REPLY);
} else {
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered transaction %d\n",
t->debug_id);
binder_free_transaction(t);
}
binder_cleanup_transaction(t, "process died.",
BR_DEAD_REPLY);
} break;
case BINDER_WORK_RETURN_ERROR: {
struct binder_error *e = container_of(

View file

@ -85,6 +85,30 @@ DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_ioctl_done);
DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done);
DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done);
TRACE_EVENT(binder_set_priority,
TP_PROTO(int proc, int thread, unsigned int old_prio,
unsigned int desired_prio, unsigned int new_prio),
TP_ARGS(proc, thread, old_prio, new_prio, desired_prio),
TP_STRUCT__entry(
__field(int, proc)
__field(int, thread)
__field(unsigned int, old_prio)
__field(unsigned int, new_prio)
__field(unsigned int, desired_prio)
),
TP_fast_assign(
__entry->proc = proc;
__entry->thread = thread;
__entry->old_prio = old_prio;
__entry->new_prio = new_prio;
__entry->desired_prio = desired_prio;
),
TP_printk("proc=%d thread=%d old=%d => new=%d desired=%d",
__entry->proc, __entry->thread, __entry->old_prio,
__entry->new_prio, __entry->desired_prio)
);
TRACE_EVENT(binder_wait_for_work,
TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo),
TP_ARGS(proc_work, transaction_stack, thread_todo),

View file

@ -224,7 +224,6 @@ static DECLARE_TRANSPORT_CLASS(ata_port_class,
static void ata_tport_release(struct device *dev)
{
put_device(dev->parent);
}
/**
@ -284,7 +283,7 @@ int ata_tport_add(struct device *parent,
device_initialize(dev);
dev->type = &ata_port_type;
dev->parent = get_device(parent);
dev->parent = parent;
dev->release = ata_tport_release;
dev_set_name(dev, "ata%d", ap->print_id);
transport_setup_device(dev);
@ -348,7 +347,6 @@ static DECLARE_TRANSPORT_CLASS(ata_link_class,
static void ata_tlink_release(struct device *dev)
{
put_device(dev->parent);
}
/**
@ -410,7 +408,7 @@ int ata_tlink_add(struct ata_link *link)
int error;
device_initialize(dev);
dev->parent = get_device(&ap->tdev);
dev->parent = &ap->tdev;
dev->release = ata_tlink_release;
if (ata_is_host_link(link))
dev_set_name(dev, "link%d", ap->print_id);
@ -588,7 +586,6 @@ static DECLARE_TRANSPORT_CLASS(ata_dev_class,
static void ata_tdev_release(struct device *dev)
{
put_device(dev->parent);
}
/**
@ -661,7 +658,7 @@ static int ata_tdev_add(struct ata_device *ata_dev)
int error;
device_initialize(dev);
dev->parent = get_device(&link->tdev);
dev->parent = &link->tdev;
dev->release = ata_tdev_release;
if (ata_is_host_link(link))
dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno);

View file

@ -829,7 +829,8 @@ static ssize_t driver_override_store(struct device *dev,
struct platform_device *pdev = to_platform_device(dev);
char *driver_override, *old, *cp;
if (count > PATH_MAX)
/* We need to keep extra room for a newline */
if (count >= (PAGE_SIZE - 1))
return -EINVAL;
driver_override = kstrndup(buf, count, GFP_KERNEL);

View file

@ -1749,9 +1749,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
req_ctx->swinit = 0;
} else {
desc->ptr[1] = zero_entry;
/* Indicate next op is not the first. */
req_ctx->first = 0;
}
/* Indicate next op is not the first. */
req_ctx->first = 0;
/* HMAC key */
if (ctx->keylen)
@ -2770,7 +2770,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
t_alg->algt.alg.hash.final = ahash_final;
t_alg->algt.alg.hash.finup = ahash_finup;
t_alg->algt.alg.hash.digest = ahash_digest;
t_alg->algt.alg.hash.setkey = ahash_setkey;
if (!strncmp(alg->cra_name, "hmac", 4))
t_alg->algt.alg.hash.setkey = ahash_setkey;
t_alg->algt.alg.hash.import = ahash_import;
t_alg->algt.alg.hash.export = ahash_export;

View file

@ -168,7 +168,7 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
return ret;
}
vbus_attach = (pwr_stat & PS_STAT_VBUS_PRESENT);
vbus_attach = (pwr_stat & PS_STAT_VBUS_VALID);
if (!vbus_attach)
goto notify_otg;

View file

@ -550,7 +550,7 @@ out_put_node:
return err;
}
static const struct of_device_id const psci_of_match[] __initconst = {
static const struct of_device_id psci_of_match[] __initconst = {
{ .compatible = "arm,psci", .data = psci_0_1_init},
{ .compatible = "arm,psci-0.2", .data = psci_0_2_init},
{ .compatible = "arm,psci-1.0", .data = psci_0_2_init},

View file

@ -739,8 +739,10 @@ int kfd_wait_on_events(struct kfd_process *p,
struct kfd_event_data event_data;
if (copy_from_user(&event_data, &events[i],
sizeof(struct kfd_event_data)))
sizeof(struct kfd_event_data))) {
ret = -EFAULT;
goto fail;
}
ret = init_event_waiter(p, &event_waiters[i],
event_data.event_id, i);

View file

@ -957,6 +957,13 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
if (port == PORT_A && is_dvi) {
DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
is_hdmi ? "/HDMI" : "");
is_dvi = false;
is_hdmi = false;
}
info->supports_dvi = is_dvi;
info->supports_hdmi = is_hdmi;
info->supports_dp = is_dp;

View file

@ -540,7 +540,8 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
{
/* the worst case is computed from the set_report command with a
* reportID > 15 and the maximum report length */
int args_len = sizeof(__u8) + /* optional ReportID byte */
int args_len = sizeof(__u8) + /* ReportID */
sizeof(__u8) + /* optional ReportID byte */
sizeof(__u16) + /* data register */
sizeof(__u16) + /* size of the report */
report_size; /* report */

View file

@ -155,6 +155,10 @@ static void fcopy_send_data(struct work_struct *dummy)
out_src = smsg_out;
break;
case WRITE_TO_FILE:
out_src = fcopy_transaction.fcopy_msg;
out_len = sizeof(struct hv_do_fcopy);
break;
default:
out_src = fcopy_transaction.fcopy_msg;
out_len = fcopy_transaction.recv_len;

View file

@ -208,11 +208,13 @@ static ssize_t get_cpu_vid(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(cpu0_vid, S_IRUGO, get_cpu_vid, NULL);
#define VDD_FROM_REG(val) (((val) * 95 + 2) / 4)
#define VDD_TO_REG(val) clamp_val((((val) * 4 + 47) / 95), 0, 255)
#define VDD_FROM_REG(val) DIV_ROUND_CLOSEST((val) * 95, 4)
#define VDD_CLAMP(val) clamp_val(val, 0, 255 * 95 / 4)
#define VDD_TO_REG(val) DIV_ROUND_CLOSEST(VDD_CLAMP(val) * 4, 95)
#define IN_FROM_REG(val) ((val) * 19)
#define IN_TO_REG(val) clamp_val((((val) + 9) / 19), 0, 255)
#define IN_FROM_REG(val) ((val) * 19)
#define IN_CLAMP(val) clamp_val(val, 0, 255 * 19)
#define IN_TO_REG(val) DIV_ROUND_CLOSEST(IN_CLAMP(val), 19)
static ssize_t get_in_input(struct device *dev, struct device_attribute *attr,
char *buf)
@ -349,8 +351,13 @@ static SENSOR_DEVICE_ATTR(in4_max, S_IRUGO | S_IWUSR,
#define DIV_FROM_REG(val) (1 << (val))
#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (480000 / ((val) << (div))))
#define FAN_TO_REG(val, div) ((val) <= 0 ? 0 : \
clamp_val((480000 + ((val) << ((div)-1))) / ((val) << (div)), 1, 255))
#define FAN_BASE(div) (480000 >> (div))
#define FAN_CLAMP(val, div) clamp_val(val, FAN_BASE(div) / 255, \
FAN_BASE(div))
#define FAN_TO_REG(val, div) ((val) == 0 ? 0 : \
DIV_ROUND_CLOSEST(480000, \
FAN_CLAMP(val, div) << (div)))
static ssize_t get_fan_input(struct device *dev, struct device_attribute *attr,
char *buf)
@ -513,9 +520,9 @@ static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(fan1_off, S_IRUGO | S_IWUSR,
get_fan_off, set_fan_off);
#define TEMP_FROM_REG(val) (((val) - 130) * 1000)
#define TEMP_TO_REG(val) clamp_val(((((val) < 0 ? \
(val) - 500 : (val) + 500) / 1000) + 130), 0, 255)
#define TEMP_FROM_REG(val) (((val) - 130) * 1000)
#define TEMP_CLAMP(val) clamp_val(val, -130000, 125000)
#define TEMP_TO_REG(val) (DIV_ROUND_CLOSEST(TEMP_CLAMP(val), 1000) + 130)
static ssize_t get_temp_input(struct device *dev, struct device_attribute *attr,
char *buf)

View file

@ -1065,7 +1065,7 @@ void stm_source_unregister_device(struct stm_source_data *data)
stm_source_link_drop(src);
device_destroy(&stm_source_class, src->dev.devt);
device_unregister(&src->dev);
}
EXPORT_SYMBOL_GPL(stm_source_unregister_device);

View file

@ -175,7 +175,7 @@ static void meson_i2c_put_data(struct meson_i2c *i2c, char *buf, int len)
wdata1 |= *buf++ << ((i - 4) * 8);
writel(wdata0, i2c->regs + REG_TOK_WDATA0);
writel(wdata0, i2c->regs + REG_TOK_WDATA1);
writel(wdata1, i2c->regs + REG_TOK_WDATA1);
dev_dbg(i2c->dev, "%s: data %08x %08x len %d\n", __func__,
wdata0, wdata1, len);

View file

@ -257,7 +257,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
unsigned int vref_mv)
{
struct ad7793_state *st = iio_priv(indio_dev);
int i, ret = -1;
int i, ret;
unsigned long long scale_uv;
u32 id;
@ -266,7 +266,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
return ret;
/* reset the serial interface */
ret = spi_write(st->sd.spi, (u8 *)&ret, sizeof(ret));
ret = ad_sd_reset(&st->sd, 32);
if (ret < 0)
goto out;
usleep_range(500, 2000); /* Wait for at least 500us */

View file

@ -177,6 +177,34 @@ out:
}
EXPORT_SYMBOL_GPL(ad_sd_read_reg);
/**
* ad_sd_reset() - Reset the serial interface
*
* @sigma_delta: The sigma delta device
* @reset_length: Number of SCLKs with DIN = 1
*
* Returns 0 on success, an error code otherwise.
**/
int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
unsigned int reset_length)
{
uint8_t *buf;
unsigned int size;
int ret;
size = DIV_ROUND_UP(reset_length, 8);
buf = kcalloc(size, sizeof(*buf), GFP_KERNEL);
if (!buf)
return -ENOMEM;
memset(buf, 0xff, size);
ret = spi_write(sigma_delta->spi, buf, size);
kfree(buf);
return ret;
}
EXPORT_SYMBOL_GPL(ad_sd_reset);
static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
unsigned int mode, unsigned int channel)
{

View file

@ -28,8 +28,6 @@
#include <linux/iio/driver.h>
#define AXP288_ADC_EN_MASK 0xF1
#define AXP288_ADC_TS_PIN_GPADC 0xF2
#define AXP288_ADC_TS_PIN_ON 0xF3
enum axp288_adc_id {
AXP288_ADC_TS,
@ -123,16 +121,6 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
return IIO_VAL_INT;
}
static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
unsigned long address)
{
/* channels other than GPADC do not need to switch TS pin */
if (address != AXP288_GP_ADC_H)
return 0;
return regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
}
static int axp288_adc_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
@ -143,16 +131,7 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
chan->address)) {
dev_err(&indio_dev->dev, "GPADC mode\n");
ret = -EINVAL;
break;
}
ret = axp288_adc_read_channel(val, chan->address, info->regmap);
if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
chan->address))
dev_err(&indio_dev->dev, "TS pin restore\n");
break;
default:
ret = -EINVAL;
@ -162,15 +141,6 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
return ret;
}
static int axp288_adc_set_state(struct regmap *regmap)
{
/* ADC should be always enabled for internal FG to function */
if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
return -EIO;
return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
}
static const struct iio_info axp288_adc_iio_info = {
.read_raw = &axp288_adc_read_raw,
.driver_module = THIS_MODULE,
@ -199,7 +169,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
* Set ADC to enabled state at all time, including system suspend.
* otherwise internal fuel gauge functionality may be affected.
*/
ret = axp288_adc_set_state(axp20x->regmap);
ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
if (ret) {
dev_err(&pdev->dev, "unable to enable ADC device\n");
return ret;

View file

@ -17,6 +17,8 @@
* MCP3204
* MCP3208
* ------------
* 13 bit converter
* MCP3301
*
* Datasheet can be found here:
* http://ww1.microchip.com/downloads/en/DeviceDoc/21293C.pdf mcp3001
@ -96,7 +98,7 @@ static int mcp320x_channel_to_tx_data(int device_index,
}
static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
bool differential, int device_index)
bool differential, int device_index, int *val)
{
int ret;
@ -117,19 +119,25 @@ static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
switch (device_index) {
case mcp3001:
return (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
*val = (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
return 0;
case mcp3002:
case mcp3004:
case mcp3008:
return (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
*val = (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
return 0;
case mcp3201:
return (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
*val = (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
return 0;
case mcp3202:
case mcp3204:
case mcp3208:
return (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
*val = (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
return 0;
case mcp3301:
return sign_extend32((adc->rx_buf[0] & 0x1f) << 8 | adc->rx_buf[1], 12);
*val = sign_extend32((adc->rx_buf[0] & 0x1f) << 8
| adc->rx_buf[1], 12);
return 0;
default:
return -EINVAL;
}
@ -150,12 +158,10 @@ static int mcp320x_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = mcp320x_adc_conversion(adc, channel->address,
channel->differential, device_index);
channel->differential, device_index, val);
if (ret < 0)
goto out;
*val = ret;
ret = IIO_VAL_INT;
break;
@ -304,6 +310,7 @@ static int mcp320x_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &mcp320x_info;
spi_set_drvdata(spi, indio_dev);
chip_info = &mcp320x_chip_infos[spi_get_device_id(spi)->driver_data];
indio_dev->channels = chip_info->channels;

View file

@ -866,8 +866,10 @@ static int twl4030_madc_probe(struct platform_device *pdev)
/* Enable 3v1 bias regulator for MADC[3:6] */
madc->usb3v1 = devm_regulator_get(madc->dev, "vusb3v1");
if (IS_ERR(madc->usb3v1))
return -ENODEV;
if (IS_ERR(madc->usb3v1)) {
ret = -ENODEV;
goto err_i2c;
}
ret = regulator_enable(madc->usb3v1);
if (ret)
@ -876,11 +878,13 @@ static int twl4030_madc_probe(struct platform_device *pdev)
ret = iio_device_register(iio_dev);
if (ret) {
dev_err(&pdev->dev, "could not register iio device\n");
goto err_i2c;
goto err_usb3v1;
}
return 0;
err_usb3v1:
regulator_disable(madc->usb3v1);
err_i2c:
twl4030_madc_set_current_generator(madc, 0, 0);
err_current_generator:

View file

@ -221,8 +221,10 @@ static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
ret = indio_dev->info->debugfs_reg_access(indio_dev,
indio_dev->cached_reg_addr,
0, &val);
if (ret)
if (ret) {
dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
return ret;
}
len = snprintf(buf, sizeof(buf), "0x%X\n", val);

View file

@ -7097,7 +7097,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
unsigned long flags;
while (wait) {
unsigned long shadow;
unsigned long shadow = 0;
int cstart, previ = -1;
/*

View file

@ -1239,7 +1239,7 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from path/mc list */
list_del(&neigh->list);
list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
} else {
np = &neigh->hnext;
@ -1406,7 +1406,7 @@ void ipoib_neigh_free(struct ipoib_neigh *neigh)
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from parent list */
list_del(&neigh->list);
list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
return;
} else {
@ -1491,7 +1491,7 @@ void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from parent list */
list_del(&neigh->list);
list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
} else {
np = &neigh->hnext;
@ -1533,7 +1533,7 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from path/mc list */
list_del(&neigh->list);
list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
}
}

View file

@ -160,11 +160,11 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
out:
up_write(&ppriv->vlan_rwsem);
rtnl_unlock();
if (result)
free_netdev(priv->dev);
rtnl_unlock();
return result;
}
@ -185,7 +185,6 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey &&
priv->child_type == IPOIB_LEGACY_CHILD) {
unregister_netdevice(priv->dev);
list_del(&priv->list);
dev = priv->dev;
break;
@ -193,6 +192,11 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
}
up_write(&ppriv->vlan_rwsem);
if (dev) {
ipoib_dbg(ppriv, "delete child vlan %s\n", dev->name);
unregister_netdevice(dev);
}
rtnl_unlock();
if (dev) {

View file

@ -492,8 +492,12 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
pte |= ARM_LPAE_PTE_NSTABLE;
__arm_lpae_set_pte(ptep, pte, cfg);
} else {
} else if (!iopte_leaf(pte, lvl)) {
cptep = iopte_deref(pte, data);
} else {
/* We require an unmap first */
WARN_ON(!selftest_running);
return -EEXIST;
}
/* Rinse, repeat */

View file

@ -1414,11 +1414,24 @@ retry_write:
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
cb = blk_check_plugged(raid10_unplug, mddev,
sizeof(*plug));
if (cb)
plug = container_of(cb, struct raid10_plug_cb,
cb);
else
plug = NULL;
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
if (plug) {
bio_list_add(&plug->pending, mbio);
plug->pending_cnt++;
} else {
bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
}
spin_unlock_irqrestore(&conf->device_lock, flags);
if (!mddev_check_plugged(mddev))
if (!plug)
md_wakeup_thread(mddev->thread);
}
}

View file

@ -818,6 +818,14 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
spin_unlock(&head->batch_head->batch_lock);
goto unlock_out;
}
/*
* We must assign batch_head of this stripe within the
* batch_lock, otherwise clear_batch_ready of batch head
* stripe could clear BATCH_READY bit of this stripe and
* this stripe->batch_head doesn't get assigned, which
* could confuse clear_batch_ready for this stripe
*/
sh->batch_head = head->batch_head;
/*
* at this point, head's BATCH_READY could be cleared, but we
@ -825,8 +833,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
*/
list_add(&sh->batch_list, &head->batch_list);
spin_unlock(&head->batch_head->batch_lock);
sh->batch_head = head->batch_head;
} else {
head->batch_head = head;
sh->batch_head = head->batch_head;
@ -4258,7 +4264,8 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
(1 << STRIPE_PREREAD_ACTIVE) |
(1 << STRIPE_DEGRADED)),
(1 << STRIPE_DEGRADED) |
(1 << STRIPE_ON_UNPLUG_LIST)),
head_sh->state & (1 << STRIPE_INSYNC));
sh->check_state = head_sh->check_state;

View file

@ -56,11 +56,11 @@
by Nathan Laredo <laredo@gnu.org> */
int av7110_debiwrite(struct av7110 *av7110, u32 config,
int addr, u32 val, int count)
int addr, u32 val, unsigned int count)
{
struct saa7146_dev *dev = av7110->dev;
if (count <= 0 || count > 32764) {
if (count > 32764) {
printk("%s: invalid count %d\n", __func__, count);
return -1;
}
@ -78,12 +78,12 @@ int av7110_debiwrite(struct av7110 *av7110, u32 config,
return 0;
}
u32 av7110_debiread(struct av7110 *av7110, u32 config, int addr, int count)
u32 av7110_debiread(struct av7110 *av7110, u32 config, int addr, unsigned int count)
{
struct saa7146_dev *dev = av7110->dev;
u32 result = 0;
if (count > 32764 || count <= 0) {
if (count > 32764) {
printk("%s: invalid count %d\n", __func__, count);
return 0;
}

View file

@ -377,14 +377,14 @@ extern int av7110_fw_request(struct av7110 *av7110, u16 *request_buf,
/* DEBI (saa7146 data extension bus interface) access */
extern int av7110_debiwrite(struct av7110 *av7110, u32 config,
int addr, u32 val, int count);
int addr, u32 val, unsigned int count);
extern u32 av7110_debiread(struct av7110 *av7110, u32 config,
int addr, int count);
int addr, unsigned int count);
/* DEBI during interrupt */
/* single word writes */
static inline void iwdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
static inline void iwdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
{
av7110_debiwrite(av7110, config, addr, val, count);
}
@ -397,7 +397,7 @@ static inline void mwdebi(struct av7110 *av7110, u32 config, int addr,
av7110_debiwrite(av7110, config, addr, 0, count);
}
static inline u32 irdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
static inline u32 irdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
{
u32 res;
@ -408,7 +408,7 @@ static inline u32 irdebi(struct av7110 *av7110, u32 config, int addr, u32 val, i
}
/* DEBI outside interrupts, only for count <= 4! */
static inline void wdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
static inline void wdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
{
unsigned long flags;
@ -417,7 +417,7 @@ static inline void wdebi(struct av7110 *av7110, u32 config, int addr, u32 val, i
spin_unlock_irqrestore(&av7110->debilock, flags);
}
static inline u32 rdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
static inline u32 rdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
{
unsigned long flags;
u32 res;

View file

@ -849,9 +849,7 @@ int gsc_prepare_addr(struct gsc_ctx *ctx, struct vb2_buffer *vb,
if ((frame->fmt->pixelformat == V4L2_PIX_FMT_VYUY) ||
(frame->fmt->pixelformat == V4L2_PIX_FMT_YVYU) ||
(frame->fmt->pixelformat == V4L2_PIX_FMT_NV61) ||
(frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) ||
(frame->fmt->pixelformat == V4L2_PIX_FMT_NV21) ||
(frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M))
swap(addr->cb, addr->cr);

View file

@ -176,6 +176,10 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
kernel = false;
}
/*
* Increment driver use count. Enables global TLBIs for hash
* and callbacks to handle the segment table
*/
cxl_ctx_get();
if ((rc = cxl_attach_process(ctx, kernel, wed , 0))) {

View file

@ -94,7 +94,6 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
pr_devel("afu_open pe: %i\n", ctx->pe);
file->private_data = ctx;
cxl_ctx_get();
/* indicate success */
rc = 0;
@ -205,11 +204,18 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
/*
* Increment driver use count. Enables global TLBIs for hash
* and callbacks to handle the segment table
*/
cxl_ctx_get();
trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
if ((rc = cxl_attach_process(ctx, false, work.work_element_descriptor,
amr))) {
afu_release_irqs(ctx, ctx);
cxl_ctx_put();
goto out;
}

View file

@ -277,7 +277,7 @@ static void sdio_release_func(struct device *dev)
sdio_free_func_cis(func);
kfree(func->info);
kfree(func->tmpbuf);
kfree(func);
}
@ -292,6 +292,16 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)
if (!func)
return ERR_PTR(-ENOMEM);
/*
* allocate buffer separately to make sure it's properly aligned for
* DMA usage (incl. 64 bit DMA)
*/
func->tmpbuf = kmalloc(4, GFP_KERNEL);
if (!func->tmpbuf) {
kfree(func);
return ERR_PTR(-ENOMEM);
}
func->card = card;
device_initialize(&func->dev);

View file

@ -7658,6 +7658,11 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
/* In case of PCI error, adapter lose its HW address
* so we should re-assign it here.
*/
hw->hw_addr = adapter->io_addr;
igb_reset(adapter);
wr32(E1000_WUS, ~0);
result = PCI_ERS_RESULT_RECOVERED;

View file

@ -339,7 +339,7 @@ enum FELIC_MODE_BIT {
ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000,
ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000,
ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000,
ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
ECMR_MPDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
ECMR_RTM = 0x00000010, ECMR_ILB = 0x00000008, ECMR_ELB = 0x00000004,
ECMR_DM = 0x00000002, ECMR_PRM = 0x00000001,
};

View file

@ -2343,8 +2343,10 @@ start_again:
hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
TEAM_CMD_OPTIONS_GET);
if (!hdr)
if (!hdr) {
nlmsg_free(skb);
return -EMSGSIZE;
}
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
goto nla_put_failure;
@ -2611,8 +2613,10 @@ start_again:
hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
TEAM_CMD_PORT_LIST_GET);
if (!hdr)
if (!hdr) {
nlmsg_free(skb);
return -EMSGSIZE;
}
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
goto nla_put_failure;

View file

@ -364,7 +364,7 @@ config USB_NET_NET1080
optionally with LEDs that indicate traffic
config USB_NET_PLUSB
tristate "Prolific PL-2301/2302/25A1 based cables"
tristate "Prolific PL-2301/2302/25A1/27A1 based cables"
# if the handshake/init/reset problems, from original 'plusb',
# are ever resolved ... then remove "experimental"
depends on USB_USBNET

View file

@ -102,7 +102,7 @@ static int pl_reset(struct usbnet *dev)
}
static const struct driver_info prolific_info = {
.description = "Prolific PL-2301/PL-2302/PL-25A1",
.description = "Prolific PL-2301/PL-2302/PL-25A1/PL-27A1",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT,
/* some PL-2302 versions seem to fail usb_set_interface() */
.reset = pl_reset,
@ -139,6 +139,17 @@ static const struct usb_device_id products [] = {
* Host-to-Host Cable
*/
.driver_info = (unsigned long) &prolific_info,
},
/* super speed cables */
{
USB_DEVICE(0x067b, 0x27a1), /* PL-27A1, no eeprom
* also: goobay Active USB 3.0
* Data Link,
* Unitek Y-3501
*/
.driver_info = (unsigned long) &prolific_info,
},
{ }, // END
@ -158,5 +169,5 @@ static struct usb_driver plusb_driver = {
module_usb_driver(plusb_driver);
MODULE_AUTHOR("David Brownell");
MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1 USB Host to Host Link Driver");
MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1/27A1 USB Host to Host Link Driver");
MODULE_LICENSE("GPL");

View file

@ -1990,6 +1990,10 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
elength = 1;
goto next_desc;
}
if ((buflen < elength) || (elength < 3)) {
dev_err(&intf->dev, "invalid descriptor buffer length\n");
break;
}
if (buffer[1] != USB_DT_CS_INTERFACE) {
dev_err(&intf->dev, "skipping garbage\n");
goto next_desc;

View file

@ -876,7 +876,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
eth_broadcast_addr(params_le->bssid);
params_le->bss_type = DOT11_BSSTYPE_ANY;
params_le->scan_type = 0;
params_le->scan_type = BRCMF_SCANTYPE_ACTIVE;
params_le->channel_num = 0;
params_le->nprobes = cpu_to_le32(-1);
params_le->active_time = cpu_to_le32(-1);
@ -884,12 +884,9 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
params_le->home_time = cpu_to_le32(-1);
memset(&params_le->ssid_le, 0, sizeof(params_le->ssid_le));
/* if request is null exit so it will be all channel broadcast scan */
if (!request)
return;
n_ssids = request->n_ssids;
n_channels = request->n_channels;
/* Copy channel array if applicable */
brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
n_channels);
@ -926,16 +923,8 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
ptr += sizeof(ssid_le);
}
} else {
brcmf_dbg(SCAN, "Broadcast scan %p\n", request->ssids);
if ((request->ssids) && request->ssids->ssid_len) {
brcmf_dbg(SCAN, "SSID %s len=%d\n",
params_le->ssid_le.SSID,
request->ssids->ssid_len);
params_le->ssid_le.SSID_len =
cpu_to_le32(request->ssids->ssid_len);
memcpy(&params_le->ssid_le.SSID, request->ssids->ssid,
request->ssids->ssid_len);
}
brcmf_dbg(SCAN, "Performing passive scan\n");
params_le->scan_type = BRCMF_SCANTYPE_PASSIVE;
}
/* Adding mask to channel numbers */
params_le->channel_num =

View file

@ -45,6 +45,11 @@
#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16
/* scan type definitions */
#define BRCMF_SCANTYPE_DEFAULT 0xFF
#define BRCMF_SCANTYPE_ACTIVE 0
#define BRCMF_SCANTYPE_PASSIVE 1
/* primary (ie tx) key */
#define BRCMF_PRIMARY_KEY (1 << 1)
#define DOT11_BSSTYPE_ANY 2

View file

@ -73,6 +73,7 @@
/* NVM offsets (in words) definitions */
enum wkp_nvm_offsets {
/* NVM HW-Section offset (in words) definitions */
SUBSYSTEM_ID = 0x0A,
HW_ADDR = 0x15,
/* NVM SW-Section offset (in words) definitions */
@ -257,13 +258,12 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 * const nvm_ch_flags,
bool lar_supported)
bool lar_supported, bool no_wide_in_5ghz)
{
int ch_idx;
int n_channels = 0;
struct ieee80211_channel *channel;
u16 ch_flags;
bool is_5ghz;
int num_of_ch, num_2ghz_channels;
const u8 *nvm_chan;
@ -278,12 +278,20 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
}
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
bool is_5ghz = (ch_idx >= num_2ghz_channels);
ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
if (ch_idx >= num_2ghz_channels &&
!data->sku_cap_band_52GHz_enable)
if (is_5ghz && !data->sku_cap_band_52GHz_enable)
continue;
/* workaround to disable wide channels in 5GHz */
if (no_wide_in_5ghz && is_5ghz) {
ch_flags &= ~(NVM_CHANNEL_40MHZ |
NVM_CHANNEL_80MHZ |
NVM_CHANNEL_160MHZ);
}
if (!lar_supported && !(ch_flags & NVM_CHANNEL_VALID)) {
/*
* Channels might become valid later if lar is
@ -303,8 +311,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
n_channels++;
channel->hw_value = nvm_chan[ch_idx];
channel->band = (ch_idx < num_2ghz_channels) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
channel->band = is_5ghz ?
IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
channel->center_freq =
ieee80211_channel_to_frequency(
channel->hw_value, channel->band);
@ -316,7 +324,6 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
* is not used in mvm, and is used for backwards compatibility
*/
channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
/* don't put limitations in case we're using LAR */
if (!lar_supported)
@ -405,7 +412,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 *ch_section,
u8 tx_chains, u8 rx_chains, bool lar_supported)
u8 tx_chains, u8 rx_chains, bool lar_supported,
bool no_wide_in_5ghz)
{
int n_channels;
int n_used = 0;
@ -414,12 +422,14 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
n_channels = iwl_init_channel_map(
dev, cfg, data,
&ch_section[NVM_CHANNELS], lar_supported);
&ch_section[NVM_CHANNELS], lar_supported,
no_wide_in_5ghz);
else
n_channels = iwl_init_channel_map(
dev, cfg, data,
&ch_section[NVM_CHANNELS_FAMILY_8000],
lar_supported);
lar_supported,
no_wide_in_5ghz);
sband = &data->bands[IEEE80211_BAND_2GHZ];
sband->band = IEEE80211_BAND_2GHZ;
@ -582,6 +592,39 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
#define IWL_4165_DEVICE_ID 0x5501
static bool
iwl_nvm_no_wide_in_5ghz(struct device *dev, const struct iwl_cfg *cfg,
const __le16 *nvm_hw)
{
/*
* Workaround a bug in Indonesia SKUs where the regulatory in
* some 7000-family OTPs erroneously allow wide channels in
* 5GHz. To check for Indonesia, we take the SKU value from
* bits 1-4 in the subsystem ID and check if it is either 5 or
* 9. In those cases, we need to force-disable wide channels
* in 5GHz otherwise the FW will throw a sysassert when we try
* to use them.
*/
if (cfg->device_family == IWL_DEVICE_FAMILY_7000) {
/*
* Unlike the other sections in the NVM, the hw
* section uses big-endian.
*/
u16 subsystem_id = be16_to_cpup((const __be16 *)nvm_hw
+ SUBSYSTEM_ID);
u8 sku = (subsystem_id & 0x1e) >> 1;
if (sku == 5 || sku == 9) {
IWL_DEBUG_EEPROM(dev,
"disabling wide channels in 5GHz (0x%0x %d)\n",
subsystem_id, sku);
return true;
}
}
return false;
}
struct iwl_nvm_data *
iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
const __le16 *nvm_hw, const __le16 *nvm_sw,
@ -591,6 +634,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
u32 mac_addr0, u32 mac_addr1, u32 hw_id)
{
struct iwl_nvm_data *data;
bool no_wide_in_5ghz = iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw);
u32 sku;
u32 radio_cfg;
u16 lar_config;
@ -657,7 +701,8 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
iwl_set_hw_address(cfg, data, nvm_hw);
iwl_init_sbands(dev, cfg, data, nvm_sw,
tx_chains, rx_chains, lar_fw_supported);
tx_chains, rx_chains, lar_fw_supported,
no_wide_in_5ghz);
} else {
u16 lar_offset = data->nvm_version < 0xE39 ?
NVM_LAR_OFFSET_FAMILY_8000_OLD :
@ -673,7 +718,8 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
iwl_init_sbands(dev, cfg, data, regulatory,
tx_chains, rx_chains,
lar_fw_supported && data->lar_enabled);
lar_fw_supported && data->lar_enabled,
no_wide_in_5ghz);
}
data->calib_version = 255;

View file

@ -14,6 +14,7 @@
#ifndef _NVME_H
#define _NVME_H
#include <linux/mutex.h>
#include <linux/nvme.h>
#include <linux/pci.h>
#include <linux/kref.h>
@ -62,6 +63,7 @@ struct nvme_dev {
struct work_struct reset_work;
struct work_struct probe_work;
struct work_struct scan_work;
struct mutex shutdown_lock;
char name[12];
char serial[20];
char model[40];

View file

@ -2954,6 +2954,7 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
nvme_dev_list_remove(dev);
mutex_lock(&dev->shutdown_lock);
if (pci_is_enabled(to_pci_dev(dev->dev))) {
nvme_freeze_queues(dev);
csts = readl(&dev->bar->csts);
@ -2972,6 +2973,7 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
for (i = dev->queue_count - 1; i >= 0; i--)
nvme_clear_queue(dev->queues[i]);
mutex_unlock(&dev->shutdown_lock);
}
static void nvme_dev_remove(struct nvme_dev *dev)
@ -3328,6 +3330,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&dev->namespaces);
INIT_WORK(&dev->reset_work, nvme_reset_work);
mutex_init(&dev->shutdown_lock);
dev->dev = get_device(&pdev->dev);
pci_set_drvdata(pdev, dev);

View file

@ -522,7 +522,7 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
char *driver_override, *old = pdev->driver_override, *cp;
char *driver_override, *old, *cp;
/* We need to keep extra room for a newline */
if (count >= (PAGE_SIZE - 1))
@ -536,12 +536,15 @@ static ssize_t driver_override_store(struct device *dev,
if (cp)
*cp = '\0';
device_lock(dev);
old = pdev->driver_override;
if (strlen(driver_override)) {
pdev->driver_override = driver_override;
} else {
kfree(driver_override);
pdev->driver_override = NULL;
}
device_unlock(dev);
kfree(old);
@ -552,8 +555,12 @@ static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
ssize_t len;
return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
device_lock(dev);
len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
device_unlock(dev);
return len;
}
static DEVICE_ATTR_RW(driver_override);

View file

@ -3697,7 +3697,7 @@ iscsi_if_rx(struct sk_buff *skb)
uint32_t group;
nlh = nlmsg_hdr(skb);
if (nlh->nlmsg_len < sizeof(*nlh) ||
if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
skb->len < nlh->nlmsg_len) {
break;
}

View file

@ -2800,8 +2800,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_write_same(sdkp, buffer);
}
sdkp->first_scan = 0;
/*
* We now have all cache related info, determine how we deal
* with flush requests.
@ -2816,7 +2814,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
/*
* Use the device's preferred I/O size for reads and writes
* Determine the device's preferred I/O size for reads and writes
* unless the reported value is unreasonably small, large, or
* garbage.
*/
@ -2830,8 +2828,19 @@ static int sd_revalidate_disk(struct gendisk *disk)
rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
(sector_t)BLK_DEF_MAX_SECTORS);
/* Combine with controller limits */
q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
/* Do not exceed controller limit */
rw_max = min(rw_max, queue_max_hw_sectors(q));
/*
* Only update max_sectors if previously unset or if the current value
* exceeds the capabilities of the hardware.
*/
if (sdkp->first_scan ||
q->limits.max_sectors > q->limits.max_dev_sectors ||
q->limits.max_sectors > q->limits.max_hw_sectors)
q->limits.max_sectors = rw_max;
sdkp->first_scan = 0;
set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
sd_config_write_same(sdkp);

View file

@ -205,11 +205,9 @@ static int ad7192_setup(struct ad7192_state *st,
struct iio_dev *indio_dev = spi_get_drvdata(st->sd.spi);
unsigned long long scale_uv;
int i, ret, id;
u8 ones[6];
/* reset the serial interface */
memset(&ones, 0xFF, 6);
ret = spi_write(st->sd.spi, &ones, 6);
ret = ad_sd_reset(&st->sd, 48);
if (ret < 0)
goto out;
usleep_range(500, 1000); /* Wait for at least 500us */

View file

@ -300,7 +300,7 @@ static int goldfish_tty_probe(struct platform_device *pdev)
return 0;
err_tty_register_device_failed:
free_irq(irq, pdev);
free_irq(irq, qtty);
err_request_irq_failed:
goldfish_tty_current_line_count--;
if (goldfish_tty_current_line_count == 0)

View file

@ -134,9 +134,9 @@ void ci_handle_vbus_change(struct ci_hdrc *ci)
if (!ci->is_otg)
return;
if (hw_read_otgsc(ci, OTGSC_BSV))
if (hw_read_otgsc(ci, OTGSC_BSV) && !ci->vbus_active)
usb_gadget_vbus_connect(&ci->gadget);
else
else if (!hw_read_otgsc(ci, OTGSC_BSV) && ci->vbus_active)
usb_gadget_vbus_disconnect(&ci->gadget);
}
@ -175,14 +175,21 @@ static void ci_handle_id_switch(struct ci_hdrc *ci)
ci_role_stop(ci);
if (role == CI_ROLE_GADGET)
if (role == CI_ROLE_GADGET &&
IS_ERR(ci->platdata->vbus_extcon.edev))
/*
* wait vbus lower than OTGSC_BSV before connecting
* to host
* Wait vbus lower than OTGSC_BSV before connecting
* to host. If connecting status is from an external
* connector instead of register, we don't need to
* care vbus on the board, since it will not affect
* external connector status.
*/
hw_wait_vbus_lower_bsv(ci);
ci_role_start(ci, role);
/* vbus change may have already occurred */
if (role == CI_ROLE_GADGET)
ci_handle_vbus_change(ci);
}
}
/**

View file

@ -609,15 +609,23 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
} else if (header->bDescriptorType ==
USB_DT_INTERFACE_ASSOCIATION) {
struct usb_interface_assoc_descriptor *d;
d = (struct usb_interface_assoc_descriptor *)header;
if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) {
dev_warn(ddev,
"config %d has an invalid interface association descriptor of length %d, skipping\n",
cfgno, d->bLength);
continue;
}
if (iad_num == USB_MAXIADS) {
dev_warn(ddev, "found more Interface "
"Association Descriptors "
"than allocated for in "
"configuration %d\n", cfgno);
} else {
config->intf_assoc[iad_num] =
(struct usb_interface_assoc_descriptor
*)header;
config->intf_assoc[iad_num] = d;
iad_num++;
}
@ -818,7 +826,7 @@ int usb_get_configuration(struct usb_device *dev)
}
if (dev->quirks & USB_QUIRK_DELAY_INIT)
msleep(100);
msleep(200);
result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
bigbuffer, length);

View file

@ -1417,7 +1417,11 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
totlen += isopkt[u].length;
}
u *= sizeof(struct usb_iso_packet_descriptor);
uurb->buffer_length = totlen;
if (totlen <= uurb->buffer_length)
uurb->buffer_length = totlen;
else
WARN_ONCE(1, "uurb->buffer_length is too short %d vs %d",
totlen, uurb->buffer_length);
break;
default:

View file

@ -4774,7 +4774,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
goto loop;
if (udev->quirks & USB_QUIRK_DELAY_INIT)
msleep(1000);
msleep(2000);
/* consecutive bus-powered hubs aren't reliable; they can
* violate the voltage drop budget. if the new child has

View file

@ -306,8 +306,6 @@ struct fsg_common {
struct completion thread_notifier;
struct task_struct *thread_task;
/* Callback functions. */
const struct fsg_operations *ops;
/* Gadget's private data. */
void *private_data;
@ -2539,6 +2537,7 @@ static void handle_exception(struct fsg_common *common)
static int fsg_main_thread(void *common_)
{
struct fsg_common *common = common_;
int i;
/*
* Allow the thread to be killed by a signal, but set the signal mask
@ -2600,21 +2599,16 @@ static int fsg_main_thread(void *common_)
common->thread_task = NULL;
spin_unlock_irq(&common->lock);
if (!common->ops || !common->ops->thread_exits
|| common->ops->thread_exits(common) < 0) {
int i;
/* Eject media from all LUNs */
down_write(&common->filesem);
for (i = 0; i < ARRAY_SIZE(common->luns); --i) {
struct fsg_lun *curlun = common->luns[i];
if (!curlun || !fsg_lun_is_open(curlun))
continue;
down_write(&common->filesem);
for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
struct fsg_lun *curlun = common->luns[i];
if (curlun && fsg_lun_is_open(curlun))
fsg_lun_close(curlun);
curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
}
up_write(&common->filesem);
}
up_write(&common->filesem);
/* Let fsg_unbind() know the thread has exited */
complete_and_exit(&common->thread_notifier, 0);
@ -2820,13 +2814,6 @@ void fsg_common_remove_luns(struct fsg_common *common)
}
EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
void fsg_common_set_ops(struct fsg_common *common,
const struct fsg_operations *ops)
{
common->ops = ops;
}
EXPORT_SYMBOL_GPL(fsg_common_set_ops);
void fsg_common_free_buffers(struct fsg_common *common)
{
_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);

View file

@ -60,17 +60,6 @@ struct fsg_module_parameters {
struct fsg_common;
/* FSF callback functions */
struct fsg_operations {
/*
* Callback function to call when thread exits. If no
* callback is set or it returns value lower then zero MSF
* will force eject all LUNs it operates on (including those
* marked as non-removable or with prevent_medium_removal flag
* set).
*/
int (*thread_exits)(struct fsg_common *common);
};
struct fsg_lun_opts {
struct config_group group;
struct fsg_lun *lun;
@ -141,9 +130,6 @@ void fsg_common_remove_lun(struct fsg_lun *lun);
void fsg_common_remove_luns(struct fsg_common *common);
void fsg_common_set_ops(struct fsg_common *common,
const struct fsg_operations *ops);
int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
unsigned int id, const char *name,
const char **name_pfx);

View file

@ -27,7 +27,7 @@
#include <linux/mmu_context.h>
#include <linux/aio.h>
#include <linux/uio.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/moduleparam.h>
@ -116,6 +116,7 @@ enum ep0_state {
struct dev_data {
spinlock_t lock;
atomic_t count;
int udc_usage;
enum ep0_state state; /* P: lock */
struct usb_gadgetfs_event event [N_EVENT];
unsigned ev_next;
@ -512,9 +513,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
INIT_WORK(&priv->work, ep_user_copy_worker);
schedule_work(&priv->work);
}
spin_unlock(&epdata->dev->lock);
usb_ep_free_request(ep, req);
spin_unlock(&epdata->dev->lock);
put_ep(epdata);
}
@ -938,9 +939,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
struct usb_request *req = dev->req;
if ((retval = setup_req (ep, req, 0)) == 0) {
++dev->udc_usage;
spin_unlock_irq (&dev->lock);
retval = usb_ep_queue (ep, req, GFP_KERNEL);
spin_lock_irq (&dev->lock);
--dev->udc_usage;
}
dev->state = STATE_DEV_CONNECTED;
@ -982,11 +985,14 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
retval = -EIO;
else {
len = min (len, (size_t)dev->req->actual);
// FIXME don't call this with the spinlock held ...
++dev->udc_usage;
spin_unlock_irq(&dev->lock);
if (copy_to_user (buf, dev->req->buf, len))
retval = -EFAULT;
else
retval = len;
spin_lock_irq(&dev->lock);
--dev->udc_usage;
clean_req (dev->gadget->ep0, dev->req);
/* NOTE userspace can't yet choose to stall */
}
@ -1130,6 +1136,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
retval = setup_req (dev->gadget->ep0, dev->req, len);
if (retval == 0) {
dev->state = STATE_DEV_CONNECTED;
++dev->udc_usage;
spin_unlock_irq (&dev->lock);
if (copy_from_user (dev->req->buf, buf, len))
retval = -EFAULT;
@ -1140,10 +1147,10 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
dev->gadget->ep0, dev->req,
GFP_KERNEL);
}
spin_lock_irq(&dev->lock);
--dev->udc_usage;
if (retval < 0) {
spin_lock_irq (&dev->lock);
clean_req (dev->gadget->ep0, dev->req);
spin_unlock_irq (&dev->lock);
} else
retval = len;
@ -1240,9 +1247,21 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
struct usb_gadget *gadget = dev->gadget;
long ret = -ENOTTY;
if (gadget->ops->ioctl)
spin_lock_irq(&dev->lock);
if (dev->state == STATE_DEV_OPENED ||
dev->state == STATE_DEV_UNBOUND) {
/* Not bound to a UDC */
} else if (gadget->ops->ioctl) {
++dev->udc_usage;
spin_unlock_irq(&dev->lock);
ret = gadget->ops->ioctl (gadget, code, value);
spin_lock_irq(&dev->lock);
--dev->udc_usage;
}
spin_unlock_irq(&dev->lock);
return ret;
}
@ -1460,10 +1479,12 @@ delegate:
if (value < 0)
break;
++dev->udc_usage;
spin_unlock (&dev->lock);
value = usb_ep_queue (gadget->ep0, dev->req,
GFP_KERNEL);
spin_lock (&dev->lock);
--dev->udc_usage;
if (value < 0) {
clean_req (gadget->ep0, dev->req);
break;
@ -1487,8 +1508,12 @@ delegate:
req->length = value;
req->zero = value < w_length;
++dev->udc_usage;
spin_unlock (&dev->lock);
value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
spin_lock(&dev->lock);
--dev->udc_usage;
spin_unlock(&dev->lock);
if (value < 0) {
DBG (dev, "ep_queue --> %d\n", value);
req->status = 0;
@ -1515,21 +1540,24 @@ static void destroy_ep_files (struct dev_data *dev)
/* break link to FS */
ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
list_del_init (&ep->epfiles);
spin_unlock_irq (&dev->lock);
dentry = ep->dentry;
ep->dentry = NULL;
parent = d_inode(dentry->d_parent);
/* break link to controller */
mutex_lock(&ep->lock);
if (ep->state == STATE_EP_ENABLED)
(void) usb_ep_disable (ep->ep);
ep->state = STATE_EP_UNBOUND;
usb_ep_free_request (ep->ep, ep->req);
ep->ep = NULL;
mutex_unlock(&ep->lock);
wake_up (&ep->wait);
put_ep (ep);
spin_unlock_irq (&dev->lock);
/* break link to dcache */
mutex_lock (&parent->i_mutex);
d_delete (dentry);
@ -1600,6 +1628,11 @@ gadgetfs_unbind (struct usb_gadget *gadget)
spin_lock_irq (&dev->lock);
dev->state = STATE_DEV_UNBOUND;
while (dev->udc_usage > 0) {
spin_unlock_irq(&dev->lock);
usleep_range(1000, 2000);
spin_lock_irq(&dev->lock);
}
spin_unlock_irq (&dev->lock);
destroy_ep_files (dev);

View file

@ -107,15 +107,6 @@ static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
static unsigned long msg_registered;
static void msg_cleanup(void);
static int msg_thread_exits(struct fsg_common *common)
{
msg_cleanup();
return 0;
}
static int msg_do_config(struct usb_configuration *c)
{
struct fsg_opts *opts;
@ -154,9 +145,6 @@ static struct usb_configuration msg_config_driver = {
static int msg_bind(struct usb_composite_dev *cdev)
{
static const struct fsg_operations ops = {
.thread_exits = msg_thread_exits,
};
struct fsg_opts *opts;
struct fsg_config config;
int status;
@ -173,8 +161,6 @@ static int msg_bind(struct usb_composite_dev *cdev)
if (status)
goto fail;
fsg_common_set_ops(opts->common, &ops);
status = fsg_common_set_cdev(opts->common, cdev, config.can_stall);
if (status)
goto fail_set_cdev;
@ -210,7 +196,6 @@ static int msg_bind(struct usb_composite_dev *cdev)
usb_composite_overwrite_options(cdev, &coverwrite);
dev_info(&cdev->gadget->dev,
DRIVER_DESC ", version: " DRIVER_VERSION "\n");
set_bit(0, &msg_registered);
return 0;
fail_otg_desc:
@ -261,9 +246,8 @@ static int __init msg_init(void)
}
module_init(msg_init);
static void msg_cleanup(void)
static void __exit msg_cleanup(void)
{
if (test_and_clear_bit(0, &msg_registered))
usb_composite_unregister(&msg_driver);
usb_composite_unregister(&msg_driver);
}
module_exit(msg_cleanup);

View file

@ -28,6 +28,8 @@
#include <asm/gpio.h>
#include "atmel_usba_udc.h"
#define USBA_VBUS_IRQFLAGS (IRQF_ONESHOT \
| IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING)
#ifdef CONFIG_USB_GADGET_DEBUG_FS
#include <linux/debugfs.h>
@ -2185,7 +2187,7 @@ static int usba_udc_probe(struct platform_device *pdev)
IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(&pdev->dev,
gpio_to_irq(udc->vbus_pin), NULL,
usba_vbus_irq_thread, IRQF_ONESHOT,
usba_vbus_irq_thread, USBA_VBUS_IRQFLAGS,
"atmel_usba_udc", udc);
if (ret) {
udc->vbus_pin = -ENODEV;

View file

@ -237,6 +237,8 @@ struct dummy_hcd {
struct usb_device *udev;
struct list_head urbp_list;
struct urbp *next_frame_urbp;
u32 stream_en_ep;
u8 num_stream[30 / 2];
@ -253,11 +255,13 @@ struct dummy {
*/
struct dummy_ep ep[DUMMY_ENDPOINTS];
int address;
int callback_usage;
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
struct dummy_request fifo_req;
u8 fifo_buf[FIFO_SIZE];
u16 devstatus;
unsigned ints_enabled:1;
unsigned udc_suspended:1;
unsigned pullup:1;
@ -440,18 +444,27 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
(~dum_hcd->old_status) & dum_hcd->port_status;
/* Report reset and disconnect events to the driver */
if (dum->driver && (disconnect || reset)) {
if (dum->ints_enabled && (disconnect || reset)) {
stop_activity(dum);
++dum->callback_usage;
spin_unlock(&dum->lock);
if (reset)
usb_gadget_udc_reset(&dum->gadget, dum->driver);
else
dum->driver->disconnect(&dum->gadget);
spin_lock(&dum->lock);
--dum->callback_usage;
}
} else if (dum_hcd->active != dum_hcd->old_active) {
} else if (dum_hcd->active != dum_hcd->old_active &&
dum->ints_enabled) {
++dum->callback_usage;
spin_unlock(&dum->lock);
if (dum_hcd->old_active && dum->driver->suspend)
dum->driver->suspend(&dum->gadget);
else if (!dum_hcd->old_active && dum->driver->resume)
dum->driver->resume(&dum->gadget);
spin_lock(&dum->lock);
--dum->callback_usage;
}
dum_hcd->old_status = dum_hcd->port_status;
@ -967,8 +980,11 @@ static int dummy_udc_start(struct usb_gadget *g,
* can't enumerate without help from the driver we're binding.
*/
spin_lock_irq(&dum->lock);
dum->devstatus = 0;
dum->driver = driver;
dum->ints_enabled = 1;
spin_unlock_irq(&dum->lock);
return 0;
}
@ -979,6 +995,16 @@ static int dummy_udc_stop(struct usb_gadget *g)
struct dummy *dum = dum_hcd->dum;
spin_lock_irq(&dum->lock);
dum->ints_enabled = 0;
stop_activity(dum);
/* emulate synchronize_irq(): wait for callbacks to finish */
while (dum->callback_usage > 0) {
spin_unlock_irq(&dum->lock);
usleep_range(1000, 2000);
spin_lock_irq(&dum->lock);
}
dum->driver = NULL;
spin_unlock_irq(&dum->lock);
@ -1032,7 +1058,12 @@ static int dummy_udc_probe(struct platform_device *pdev)
memzero_explicit(&dum->gadget, sizeof(struct usb_gadget));
dum->gadget.name = gadget_name;
dum->gadget.ops = &dummy_ops;
dum->gadget.max_speed = USB_SPEED_SUPER;
if (mod_data.is_super_speed)
dum->gadget.max_speed = USB_SPEED_SUPER;
else if (mod_data.is_high_speed)
dum->gadget.max_speed = USB_SPEED_HIGH;
else
dum->gadget.max_speed = USB_SPEED_FULL;
dum->gadget.dev.parent = &pdev->dev;
init_dummy_udc_hw(dum);
@ -1241,6 +1272,8 @@ static int dummy_urb_enqueue(
list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list);
urb->hcpriv = urbp;
if (!dum_hcd->next_frame_urbp)
dum_hcd->next_frame_urbp = urbp;
if (usb_pipetype(urb->pipe) == PIPE_CONTROL)
urb->error_count = 1; /* mark as a new urb */
@ -1517,6 +1550,8 @@ static struct dummy_ep *find_endpoint(struct dummy *dum, u8 address)
if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ?
dum->ss_hcd : dum->hs_hcd)))
return NULL;
if (!dum->ints_enabled)
return NULL;
if ((address & ~USB_DIR_IN) == 0)
return &dum->ep[0];
for (i = 1; i < DUMMY_ENDPOINTS; i++) {
@ -1758,6 +1793,7 @@ static void dummy_timer(unsigned long _dum_hcd)
spin_unlock_irqrestore(&dum->lock, flags);
return;
}
dum_hcd->next_frame_urbp = NULL;
for (i = 0; i < DUMMY_ENDPOINTS; i++) {
if (!ep_info[i].name)
@ -1774,6 +1810,10 @@ restart:
int type;
int status = -EINPROGRESS;
/* stop when we reach URBs queued after the timer interrupt */
if (urbp == dum_hcd->next_frame_urbp)
break;
urb = urbp->urb;
if (urb->unlinked)
goto return_urb;
@ -1853,10 +1893,12 @@ restart:
* until setup() returns; no reentrancy issues etc.
*/
if (value > 0) {
++dum->callback_usage;
spin_unlock(&dum->lock);
value = dum->driver->setup(&dum->gadget,
&setup);
spin_lock(&dum->lock);
--dum->callback_usage;
if (value >= 0) {
/* no delays (max 64KB data stage) */
@ -2564,8 +2606,6 @@ static struct hc_driver dummy_hcd = {
.product_desc = "Dummy host controller",
.hcd_priv_size = sizeof(struct dummy_hcd),
.flags = HCD_USB3 | HCD_SHARED,
.reset = dummy_setup,
.start = dummy_start,
.stop = dummy_stop,
@ -2594,8 +2634,12 @@ static int dummy_hcd_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc);
dum = *((void **)dev_get_platdata(&pdev->dev));
if (!mod_data.is_super_speed)
if (mod_data.is_super_speed)
dummy_hcd.flags = HCD_USB3 | HCD_SHARED;
else if (mod_data.is_high_speed)
dummy_hcd.flags = HCD_USB2;
else
dummy_hcd.flags = HCD_USB11;
hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev));
if (!hs_hcd)
return -ENOMEM;

View file

@ -969,7 +969,7 @@ EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
*
* Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
* It signals to the BIOS that the OS wants control of the host controller,
* and then waits 5 seconds for the BIOS to hand over control.
* and then waits 1 second for the BIOS to hand over control.
* If we timeout, assume the BIOS is broken and take control anyway.
*/
static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
@ -1015,9 +1015,9 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
if (val & XHCI_HC_BIOS_OWNED) {
writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
/* Wait for 5 seconds with 10 microsecond polling interval */
/* Wait for 1 second with 10 microsecond polling interval */
timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
0, 5000, 10);
0, 1000000, 10);
/* Assume a buggy BIOS and take HC ownership anyway */
if (timeout) {
@ -1046,7 +1046,7 @@ hc_init:
* operational or runtime registers. Wait 5 seconds and no more.
*/
timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
5000, 10);
5000000, 10);
/* Assume a buggy HC and start HC initialization anyway */
if (timeout) {
val = readl(op_reg_base + XHCI_STS_OFFSET);

Some files were not shown because too many files have changed in this diff Show more