This is the 4.4.204 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl3gA2oACgkQONu9yGCS
 aT7rfQ//c4X05XMCcC7uHpMX43BvzLYIRLMt81PrLuOIJWloyzKZQ6/24smCVqHS
 AER8+DVvVORLKMyXV5fEwPubXfeckAEqjTFUyI3vvAyxtQA4MYMW+a6b/GIyoZG0
 jjGBKYUGwSYsSD1nTmfiGkX8tbCQqYcQzRMk0N6drefluVo18Dxn59J+2Q4hBaRi
 /PQ2XKb9upW7Lq63rfnfgoBHgllI+Jkfl9MW8xuMnTZFda1a9xKqpNpxycQMLT5b
 wtSa8S30Tt10boQcJsj/yeG9vxiCHMNjpju3Z9DBSbAKdcZQI/DvKxh0cFk39pSi
 mvH3rW/CBEjR0+7hX46gu51mVIcIObiqz45BO5ln6KN0yC1s1EuDHYRxnyyoaC+i
 +kmxrAuO2i+S9aYtbODnHclUB7n6LxUrCmHwYtBLEwez1Cha6kH2hC2+SB08H7a8
 2PwTPbgvuwfuHloUNNC0svfCBwy/RQJRPf5NQacuZqHriAJOVuUwRFoBweWqozsS
 BVbrA1KQtR43/xjcKfQJVvnOQr923MkZ1r8qx1USXOhoZLhFXUe1yJW5gO88i3IT
 qOTRR/zisINt7Cw0KBzLiTD1sxxffkLjjg7+Mzoci6C6KHpLVXkf6BbFbD5u6XzG
 CvxzznMtyPqVyIepFi0+PT8q5+XGALSLzLo8gt3x5q+WP7h7JCY=
 =+alR
 -----END PGP SIGNATURE-----

Merge 4.4.204 into android-4.4-p

Changes in 4.4.204
	net/mlx4_en: fix mlx4 ethtool -N insertion
	sfc: Only cancel the PPS workqueue if it exists
	net/sched: act_pedit: fix WARN() in the traffic path
	net: rtnetlink: prevent underflows in do_setvfinfo()
	Revert "fs: ocfs2: fix possible null-pointer dereferences in ocfs2_xa_prepare_entry()"
	mm/ksm.c: don't WARN if page is still mapped in remove_stable_node()
	asus-wmi: Create quirk for airplane_mode LED
	asus-wmi: Add quirk_no_rfkill_wapf4 for the Asus X456UF
	asus-wmi: Add quirk_no_rfkill for the Asus N552VW
	asus-wmi: Add quirk_no_rfkill for the Asus U303LB
	asus-wmi: Add quirk_no_rfkill for the Asus Z550MA
	platform/x86: asus-wmi: Filter buggy scan codes on ASUS Q500A
	platform/x86: asus-wmi: fix asus ux303ub brightness issue
	platform/x86: asus-wmi: Set specified XUSB2PR value for X550LB
	asus-wmi: provide access to ALS control
	platform/x86: asus-wmi: try to set als by default
	platform/x86: asus-nb-wmi: Support ALS on the Zenbook UX430UQ
	platform/x86: asus-wmi: Only Tell EC the OS will handle display hotkeys from asus_nb_wmi
	platform/x86: asus-wmi: add SERIO_I8042 dependency
	mwifiex: Fix NL80211_TX_POWER_LIMITED
	ALSA: isight: fix leak of reference to firewire unit in error path of .probe callback
	printk: fix integer overflow in setup_log_buf()
	gfs2: Fix marking bitmaps non-full
	synclink_gt(): fix compat_ioctl()
	powerpc: Fix signedness bug in update_flash_db()
	powerpc/eeh: Fix use of EEH_PE_KEEP on wrong field
	brcmsmac: AP mode: update beacon when TIM changes
	spi: sh-msiof: fix deferred probing
	mmc: mediatek: fix cannot receive new request when msdc_cmd_is_ready fail
	btrfs: handle error of get_old_root
	gsmi: Fix bug in append_to_eventlog sysfs handler
	misc: mic: fix a DMA pool free failure
	amiflop: clean up on errors during setup
	scsi: ips: fix missing break in switch
	KVM/x86: Fix invvpid and invept register operand size in 64-bit mode
	scsi: isci: Use proper enumerated type in atapi_d2h_reg_frame_handler
	scsi: isci: Change sci_controller_start_task's return type to sci_status
	scsi: iscsi_tcp: Explicitly cast param in iscsi_sw_tcp_host_get_param
	clk: mmp2: fix the clock id for sdh2_clk and sdh3_clk
	scsi: dc395x: fix dma API usage in srb_done
	scsi: dc395x: fix DMA API usage in sg_update_list
	net: fix warning in af_unix
	kprobes, x86/ptrace.h: Make regs_get_kernel_stack_nth() not fault on bad stack
	ALSA: i2c/cs8427: Fix int to char conversion
	macintosh/windfarm_smu_sat: Fix debug output
	USB: misc: appledisplay: fix backlight update_status return code
	SUNRPC: Fix a compile warning for cmpxchg64()
	atm: zatm: Fix empty body Clang warnings
	s390/perf: Return error when debug_register fails
	spi: omap2-mcspi: Set FIFO DMA trigger level to word length
	sparc: Fix parport build warnings.
	ceph: fix dentry leak in ceph_readdir_prepopulate
	rtc: s35390a: Change buf's type to u8 in s35390a_init
	mISDN: Fix type of switch control variable in ctrl_teimanager
	qlcnic: fix a return in qlcnic_dcb_get_capability()
	mfd: mc13xxx-core: Fix PMIC shutdown when reading ADC values
	mfd: max8997: Enale irq-wakeup unconditionally
	selftests/ftrace: Fix to test kprobe $comm arg only if available
	thermal: rcar_thermal: Prevent hardware access during system suspend
	sparc64: Rework xchg() definition to avoid warnings.
	fs/ocfs2/dlm/dlmdebug.c: fix a sleep-in-atomic-context bug in dlm_print_one_mle()
	mm/page-writeback.c: fix range_cyclic writeback vs writepages deadlock
	um: Make line/tty semantics use true write IRQ
	linux/bitmap.h: handle constant zero-size bitmaps correctly
	linux/bitmap.h: fix type of nbits in bitmap_shift_right()
	hfsplus: fix BUG on bnode parent update
	hfs: fix BUG on bnode parent update
	hfsplus: prevent btree data loss on ENOSPC
	hfs: prevent btree data loss on ENOSPC
	hfsplus: fix return value of hfsplus_get_block()
	hfs: fix return value of hfs_get_block()
	fs/hfs/extent.c: fix array out of bounds read of array extent
	igb: shorten maximum PHC timecounter update interval
	ntb_netdev: fix sleep time mismatch
	ntb: intel: fix return value for ndev_vec_mask()
	ocfs2: don't put and assigning null to bh allocated outside
	ocfs2: fix clusters leak in ocfs2_defrag_extent()
	net: do not abort bulk send on BQL status
	sched/fair: Don't increase sd->balance_interval on newidle balance
	audit: print empty EXECVE args
	wlcore: Fix the return value in case of error in 'wlcore_vendor_cmd_smart_config_start()'
	rtl8xxxu: Fix missing break in switch
	brcmsmac: never log "tid x is not agg'able" by default
	wireless: airo: potential buffer overflow in sprintf()
	rtlwifi: rtl8192de: Fix misleading REG_MCUFWDL information
	scsi: mpt3sas: Fix Sync cache command failure during driver unload
	scsi: mpt3sas: Fix driver modifying persistent data in Manufacturing page11
	scsi: megaraid_sas: Fix msleep granularity
	scsi: lpfc: fcoe: Fix link down issue after 1000+ link bounces
	dlm: fix invalid free
	dlm: don't leak kernel pointer to userspace
	net: bcmgenet: return correct value 'ret' from bcmgenet_power_down
	sock: Reset dst when changing sk_mark via setsockopt
	pinctrl: qcom: spmi-gpio: fix gpio-hog related boot issues
	pinctrl: zynq: Use define directive for PIN_CONFIG_IO_STANDARD
	PCI: keystone: Use quirk to limit MRRS for K2G
	spi: omap2-mcspi: Fix DMA and FIFO event trigger size mismatch
	IB/hfi1: Ensure full Gen3 speed in a Gen4 system
	Bluetooth: Fix invalid-free in bcsp_close()
	ath9k_hw: fix uninitialized variable data
	dm: use blk_set_queue_dying() in __dm_destroy()
	arm64: fix for bad_mode() handler to always result in panic
	cpufreq: Skip cpufreq resume if it's not suspended
	ocfs2: remove ocfs2_is_o2cb_active()
	mmc: block: Fix tag condition with packed writes
	ARC: perf: Accommodate big-endian CPU
	x86/insn: Fix awk regexp warnings
	x86/speculation: Fix incorrect MDS/TAA mitigation status
	x86/speculation: Fix redundant MDS mitigation message
	media: vivid: Set vid_cap_streaming and vid_out_streaming to true
	media: vivid: Fix wrong locking that causes race conditions on streaming stop
	cpufreq: Add NULL checks to show() and store() methods of cpufreq
	media: b2c2-flexcop-usb: add sanity checking
	media: cxusb: detect cxusb_ctrl_msg error in query
	media: imon: invalid dereference in imon_touch_event
	virtio_console: reset on out of memory
	virtio_console: don't tie bufs to a vq
	virtio_console: allocate inbufs in add_port() only if it is needed
	virtio_console: fix uninitialized variable use
	virtio_console: drop custom control queue cleanup
	virtio_console: move removal code
	usb-serial: cp201x: support Mark-10 digital force gauge
	appledisplay: fix error handling in the scheduled work
	USB: serial: mos7840: add USB ID to support Moxa UPort 2210
	USB: serial: mos7720: fix remote wakeup
	USB: serial: mos7840: fix remote wakeup
	USB: serial: option: add support for DW5821e with eSIM support
	USB: serial: option: add support for Foxconn T77W968 LTE modules
	staging: comedi: usbduxfast: usbduxfast_ai_cmdtest rounding error
	powerpc/64s: support nospectre_v2 cmdline option
	powerpc/book3s64: Fix link stack flush on context switch
	KVM: PPC: Book3S HV: Flush link stack on guest exit to host kernel
	Linux 4.4.204

Change-Id: I63f64a109a8797f479bc7226be23ca591fa01b1c
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2019-11-28 18:42:19 +01:00
commit 583bdda5ea
136 changed files with 1173 additions and 500 deletions

View file

@ -262,8 +262,11 @@ time with the option "mds=". The valid arguments for this option are:
============ ============================================================= ============ =============================================================
Not specifying this option is equivalent to "mds=full". Not specifying this option is equivalent to "mds=full". For processors
that are affected by both TAA (TSX Asynchronous Abort) and MDS,
specifying just "mds=off" without an accompanying "tsx_async_abort=off"
will have no effect as the same mitigation is used for both
vulnerabilities.
Mitigation selection guide Mitigation selection guide
-------------------------- --------------------------

View file

@ -169,7 +169,10 @@ the option "tsx_async_abort=". The valid arguments for this option are:
systems will have no effect. systems will have no effect.
============ ============================================================= ============ =============================================================
Not specifying this option is equivalent to "tsx_async_abort=full". Not specifying this option is equivalent to "tsx_async_abort=full". For
processors that are affected by both TAA and MDS, specifying just
"tsx_async_abort=off" without an accompanying "mds=off" will have no
effect as the same mitigation is used for both vulnerabilities.
The kernel command line also allows to control the TSX feature using the The kernel command line also allows to control the TSX feature using the
parameter "tsx=" on CPUs which support TSX control. MSR_IA32_TSX_CTRL is used parameter "tsx=" on CPUs which support TSX control. MSR_IA32_TSX_CTRL is used

View file

@ -2095,6 +2095,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
full - Enable MDS mitigation on vulnerable CPUs full - Enable MDS mitigation on vulnerable CPUs
off - Unconditionally disable MDS mitigation off - Unconditionally disable MDS mitigation
On TAA-affected machines, mds=off can be prevented by
an active TAA mitigation as both vulnerabilities are
mitigated with the same mechanism so in order to disable
this mitigation, you need to specify tsx_async_abort=off
too.
Not specifying this option is equivalent to Not specifying this option is equivalent to
mds=full. mds=full.
@ -4169,6 +4175,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
off - Unconditionally disable TAA mitigation off - Unconditionally disable TAA mitigation
On MDS-affected machines, tsx_async_abort=off can be
prevented by an active MDS mitigation as both vulnerabilities
are mitigated with the same mechanism so in order to disable
this mitigation, you need to specify mds=off too.
Not specifying this option is equivalent to Not specifying this option is equivalent to
tsx_async_abort=full. On CPUs which are MDS affected tsx_async_abort=full. On CPUs which are MDS affected
and deploy MDS mitigation, TAA mitigation is not and deploy MDS mitigation, TAA mitigation is not

View file

@ -1,6 +1,6 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 203 SUBLEVEL = 204
EXTRAVERSION = EXTRAVERSION =
NAME = Blurry Fish Butt NAME = Blurry Fish Butt

View file

@ -486,8 +486,8 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
/* loop thru all available h/w condition indexes */ /* loop thru all available h/w condition indexes */
for (j = 0; j < cc_bcr.c; j++) { for (j = 0; j < cc_bcr.c; j++) {
write_aux_reg(ARC_REG_CC_INDEX, j); write_aux_reg(ARC_REG_CC_INDEX, j);
cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0); cc_name.indiv.word0 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME0));
cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1); cc_name.indiv.word1 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME1));
/* See if it has been mapped to a perf event_id */ /* See if it has been mapped to a perf event_id */
for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) { for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {

View file

@ -526,7 +526,6 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n", pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
handler[reason], esr, esr_get_class_string(esr)); handler[reason], esr, esr_get_class_string(esr));
die("Oops - bad mode", regs, 0);
local_irq_disable(); local_irq_disable();
panic("bad mode"); panic("bad mode");
} }

View file

@ -15,7 +15,10 @@
/* Patch sites */ /* Patch sites */
extern s32 patch__call_flush_count_cache; extern s32 patch__call_flush_count_cache;
extern s32 patch__flush_count_cache_return; extern s32 patch__flush_count_cache_return;
extern s32 patch__flush_link_stack_return;
extern s32 patch__call_kvm_flush_link_stack;
extern long flush_count_cache; extern long flush_count_cache;
extern long kvm_flush_link_stack;
#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */

View file

@ -81,6 +81,9 @@ static inline bool security_ftr_enabled(unsigned long feature)
// Software required to flush count cache on context switch // Software required to flush count cache on context switch
#define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull #define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull
// Software required to flush link stack on context switch
#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull
// Features enabled by default // Features enabled by default
#define SEC_FTR_DEFAULT \ #define SEC_FTR_DEFAULT \

View file

@ -367,7 +367,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
while (parent) { while (parent) {
if (!(parent->type & EEH_PE_INVALID)) if (!(parent->type & EEH_PE_INVALID))
break; break;
parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP); parent->type &= ~EEH_PE_INVALID;
parent = parent->parent; parent = parent->parent;
} }

View file

@ -477,6 +477,7 @@ flush_count_cache:
/* Save LR into r9 */ /* Save LR into r9 */
mflr r9 mflr r9
// Flush the link stack
.rept 64 .rept 64
bl .+4 bl .+4
.endr .endr
@ -486,6 +487,11 @@ flush_count_cache:
.balign 32 .balign 32
/* Restore LR */ /* Restore LR */
1: mtlr r9 1: mtlr r9
// If we're just flushing the link stack, return here
3: nop
patch_site 3b patch__flush_link_stack_return
li r9,0x7fff li r9,0x7fff
mtctr r9 mtctr r9

View file

@ -25,11 +25,12 @@ enum count_cache_flush_type {
COUNT_CACHE_FLUSH_HW = 0x4, COUNT_CACHE_FLUSH_HW = 0x4,
}; };
static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
static bool link_stack_flush_enabled;
bool barrier_nospec_enabled; bool barrier_nospec_enabled;
static bool no_nospec; static bool no_nospec;
static bool btb_flush_enabled; static bool btb_flush_enabled;
#ifdef CONFIG_PPC_FSL_BOOK3E #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
static bool no_spectrev2; static bool no_spectrev2;
#endif #endif
@ -107,7 +108,7 @@ static __init int barrier_nospec_debugfs_init(void)
device_initcall(barrier_nospec_debugfs_init); device_initcall(barrier_nospec_debugfs_init);
#endif /* CONFIG_DEBUG_FS */ #endif /* CONFIG_DEBUG_FS */
#ifdef CONFIG_PPC_FSL_BOOK3E #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
static int __init handle_nospectre_v2(char *p) static int __init handle_nospectre_v2(char *p)
{ {
no_spectrev2 = true; no_spectrev2 = true;
@ -115,6 +116,9 @@ static int __init handle_nospectre_v2(char *p)
return 0; return 0;
} }
early_param("nospectre_v2", handle_nospectre_v2); early_param("nospectre_v2", handle_nospectre_v2);
#endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_PPC_FSL_BOOK3E
void setup_spectre_v2(void) void setup_spectre_v2(void)
{ {
if (no_spectrev2) if (no_spectrev2)
@ -202,11 +206,19 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
if (ccd) if (ccd)
seq_buf_printf(&s, "Indirect branch cache disabled"); seq_buf_printf(&s, "Indirect branch cache disabled");
if (link_stack_flush_enabled)
seq_buf_printf(&s, ", Software link stack flush");
} else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
seq_buf_printf(&s, "Mitigation: Software count cache flush"); seq_buf_printf(&s, "Mitigation: Software count cache flush");
if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
seq_buf_printf(&s, " (hardware accelerated)"); seq_buf_printf(&s, " (hardware accelerated)");
if (link_stack_flush_enabled)
seq_buf_printf(&s, ", Software link stack flush");
} else if (btb_flush_enabled) { } else if (btb_flush_enabled) {
seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
} else { } else {
@ -365,18 +377,49 @@ static __init int stf_barrier_debugfs_init(void)
device_initcall(stf_barrier_debugfs_init); device_initcall(stf_barrier_debugfs_init);
#endif /* CONFIG_DEBUG_FS */ #endif /* CONFIG_DEBUG_FS */
static void no_count_cache_flush(void)
{
count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
pr_info("count-cache-flush: software flush disabled.\n");
}
static void toggle_count_cache_flush(bool enable) static void toggle_count_cache_flush(bool enable)
{ {
if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) &&
!security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK))
enable = false;
if (!enable) {
patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP); patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
pr_info("count-cache-flush: software flush disabled.\n"); patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP);
#endif
pr_info("link-stack-flush: software flush disabled.\n");
link_stack_flush_enabled = false;
no_count_cache_flush();
return; return;
} }
// This enables the branch from _switch to flush_count_cache
patch_branch_site(&patch__call_flush_count_cache, patch_branch_site(&patch__call_flush_count_cache,
(u64)&flush_count_cache, BRANCH_SET_LINK); (u64)&flush_count_cache, BRANCH_SET_LINK);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
// This enables the branch from guest_exit_cont to kvm_flush_link_stack
patch_branch_site(&patch__call_kvm_flush_link_stack,
(u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
#endif
pr_info("link-stack-flush: software flush enabled.\n");
link_stack_flush_enabled = true;
// If we just need to flush the link stack, patch an early return
if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR);
no_count_cache_flush();
return;
}
if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
count_cache_flush_type = COUNT_CACHE_FLUSH_SW; count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
pr_info("count-cache-flush: full software flush sequence enabled.\n"); pr_info("count-cache-flush: full software flush sequence enabled.\n");
@ -390,7 +433,26 @@ static void toggle_count_cache_flush(bool enable)
void setup_count_cache_flush(void) void setup_count_cache_flush(void)
{ {
toggle_count_cache_flush(true); bool enable = true;
if (no_spectrev2 || cpu_mitigations_off()) {
if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) ||
security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED))
pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n");
enable = false;
}
/*
* There's no firmware feature flag/hypervisor bit to tell us we need to
* flush the link stack on context switch. So we set it here if we see
* either of the Spectre v2 mitigations that aim to protect userspace.
*/
if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) ||
security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE))
security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
toggle_count_cache_flush(enable);
} }
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS

View file

@ -18,6 +18,7 @@
*/ */
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/code-patching-asm.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/mmu.h> #include <asm/mmu.h>
@ -1169,6 +1170,10 @@ mc_cont:
bl kvmhv_accumulate_time bl kvmhv_accumulate_time
#endif #endif
/* Possibly flush the link stack here. */
1: nop
patch_site 1b patch__call_kvm_flush_link_stack
mr r3, r12 mr r3, r12
/* Increment exit count, poke other threads to exit */ /* Increment exit count, poke other threads to exit */
bl kvmhv_commence_exit bl kvmhv_commence_exit
@ -1564,6 +1569,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mtlr r0 mtlr r0
blr blr
.balign 32
.global kvm_flush_link_stack
kvm_flush_link_stack:
/* Save LR into r0 */
mflr r0
/* Flush the link stack. On Power8 it's up to 32 entries in size. */
.rept 32
bl .+4
.endr
/* Restore LR */
mtlr r0
blr
/* /*
* Check whether an HDSI is an HPTE not found fault or something else. * Check whether an HDSI is an HPTE not found fault or something else.
* If it is an HPTE not found fault that is due to the guest accessing * If it is an HPTE not found fault that is due to the guest accessing

View file

@ -664,7 +664,7 @@ static int update_flash_db(void)
db_set_64(db, &os_area_db_id_rtc_diff, saved_params.rtc_diff); db_set_64(db, &os_area_db_id_rtc_diff, saved_params.rtc_diff);
count = os_area_flash_write(db, sizeof(struct os_area_db), pos); count = os_area_flash_write(db, sizeof(struct os_area_db), pos);
if (count < sizeof(struct os_area_db)) { if (count < 0 || count < sizeof(struct os_area_db)) {
pr_debug("%s: os_area_flash_write failed %zd\n", __func__, pr_debug("%s: os_area_flash_write failed %zd\n", __func__,
count); count);
error = count < 0 ? count : -EIO; error = count < 0 ? count : -EIO;

View file

@ -1616,14 +1616,17 @@ static int __init init_cpum_sampling_pmu(void)
} }
sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
if (!sfdbg) if (!sfdbg) {
pr_err("Registering for s390dbf failed\n"); pr_err("Registering for s390dbf failed\n");
return -ENOMEM;
}
debug_register_view(sfdbg, &debug_sprintf_view); debug_register_view(sfdbg, &debug_sprintf_view);
err = register_external_irq(EXT_IRQ_MEASURE_ALERT, err = register_external_irq(EXT_IRQ_MEASURE_ALERT,
cpumf_measurement_alert); cpumf_measurement_alert);
if (err) { if (err) {
pr_cpumsf_err(RS_INIT_FAILURE_ALRT); pr_cpumsf_err(RS_INIT_FAILURE_ALRT);
debug_unregister(sfdbg);
goto out; goto out;
} }
@ -1632,6 +1635,7 @@ static int __init init_cpum_sampling_pmu(void)
pr_cpumsf_err(RS_INIT_FAILURE_PERF); pr_cpumsf_err(RS_INIT_FAILURE_PERF);
unregister_external_irq(EXT_IRQ_MEASURE_ALERT, unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
cpumf_measurement_alert); cpumf_measurement_alert);
debug_unregister(sfdbg);
goto out; goto out;
} }
perf_cpu_notifier(cpumf_pmu_notifier); perf_cpu_notifier(cpumf_pmu_notifier);

View file

@ -40,7 +40,12 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
return val; return val;
} }
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) #define xchg(ptr,x) \
({ __typeof__(*(ptr)) __ret; \
__ret = (__typeof__(*(ptr))) \
__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
__ret; \
})
void __xchg_called_with_bad_pointer(void); void __xchg_called_with_bad_pointer(void);

View file

@ -20,6 +20,7 @@
*/ */
#define HAS_DMA #define HAS_DMA
#ifdef CONFIG_PARPORT_PC_FIFO
static DEFINE_SPINLOCK(dma_spin_lock); static DEFINE_SPINLOCK(dma_spin_lock);
#define claim_dma_lock() \ #define claim_dma_lock() \
@ -30,6 +31,7 @@ static DEFINE_SPINLOCK(dma_spin_lock);
#define release_dma_lock(__flags) \ #define release_dma_lock(__flags) \
spin_unlock_irqrestore(&dma_spin_lock, __flags); spin_unlock_irqrestore(&dma_spin_lock, __flags);
#endif
static struct sparc_ebus_info { static struct sparc_ebus_info {
struct ebus_dma_info info; struct ebus_dma_info info;

View file

@ -260,7 +260,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data)
if (err == 0) { if (err == 0) {
spin_unlock(&line->lock); spin_unlock(&line->lock);
return IRQ_NONE; return IRQ_NONE;
} else if (err < 0) { } else if ((err < 0) && (err != -EAGAIN)) {
line->head = line->buffer; line->head = line->buffer;
line->tail = line->buffer; line->tail = line->buffer;
} }

View file

@ -205,24 +205,52 @@ static inline int regs_within_kernel_stack(struct pt_regs *regs,
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
} }
/**
* regs_get_kernel_stack_nth_addr() - get the address of the Nth entry on stack
* @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns the address of the @n th entry of the
* kernel stack which is specified by @regs. If the @n th entry is NOT in
* the kernel stack, this returns NULL.
*/
static inline unsigned long *regs_get_kernel_stack_nth_addr(struct pt_regs *regs, unsigned int n)
{
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
addr += n;
if (regs_within_kernel_stack(regs, (unsigned long)addr))
return addr;
else
return NULL;
}
/* To avoid include hell, we can't include uaccess.h */
extern long probe_kernel_read(void *dst, const void *src, size_t size);
/** /**
* regs_get_kernel_stack_nth() - get Nth entry of the stack * regs_get_kernel_stack_nth() - get Nth entry of the stack
* @regs: pt_regs which contains kernel stack pointer. * @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number. * @n: stack entry number.
* *
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
* is specified by @regs. If the @n th entry is NOT in the kernel stack, * is specified by @regs. If the @n th entry is NOT in the kernel stack
* this returns 0. * this returns 0.
*/ */
static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
unsigned int n) unsigned int n)
{ {
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); unsigned long *addr;
addr += n; unsigned long val;
if (regs_within_kernel_stack(regs, (unsigned long)addr)) long ret;
return *addr;
else addr = regs_get_kernel_stack_nth_addr(regs, n);
return 0; if (addr) {
ret = probe_kernel_read(&val, addr, sizeof(val));
if (!ret)
return val;
}
return 0;
} }
#define arch_has_single_step() (1) #define arch_has_single_step() (1)

View file

@ -37,6 +37,7 @@ static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void); static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void); static void __init l1tf_select_mitigation(void);
static void __init mds_select_mitigation(void); static void __init mds_select_mitigation(void);
static void __init mds_print_mitigation(void);
static void __init taa_select_mitigation(void); static void __init taa_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */ /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
@ -99,6 +100,12 @@ void __init check_bugs(void)
mds_select_mitigation(); mds_select_mitigation();
taa_select_mitigation(); taa_select_mitigation();
/*
* As MDS and TAA mitigations are inter-related, print MDS
* mitigation until after TAA mitigation selection is done.
*/
mds_print_mitigation();
arch_smt_update(); arch_smt_update();
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
@ -224,6 +231,12 @@ static void __init mds_select_mitigation(void)
mds_mitigation = MDS_MITIGATION_OFF; mds_mitigation = MDS_MITIGATION_OFF;
return; return;
} }
}
static void __init mds_print_mitigation(void)
{
if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
return;
if (mds_mitigation == MDS_MITIGATION_FULL) { if (mds_mitigation == MDS_MITIGATION_FULL) {
if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
@ -283,8 +296,12 @@ static void __init taa_select_mitigation(void)
return; return;
} }
/* TAA mitigation is turned off on the cmdline (tsx_async_abort=off) */ /*
if (taa_mitigation == TAA_MITIGATION_OFF) * TAA mitigation via VERW is turned off if both
* tsx_async_abort=off and mds=off are specified.
*/
if (taa_mitigation == TAA_MITIGATION_OFF &&
mds_mitigation == MDS_MITIGATION_OFF)
goto out; goto out;
if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
@ -315,6 +332,15 @@ static void __init taa_select_mitigation(void)
*/ */
static_branch_enable(&mds_user_clear); static_branch_enable(&mds_user_clear);
/*
* Update MDS mitigation, if necessary, as the mds_user_clear is
* now enabled for TAA mitigation.
*/
if (mds_mitigation == MDS_MITIGATION_OFF &&
boot_cpu_has_bug(X86_BUG_MDS)) {
mds_mitigation = MDS_MITIGATION_FULL;
mds_select_mitigation();
}
out: out:
pr_info("%s\n", taa_strings[taa_mitigation]); pr_info("%s\n", taa_strings[taa_mitigation]);
} }

View file

@ -1298,7 +1298,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
return -1; return -1;
} }
static inline void __invvpid(int ext, u16 vpid, gva_t gva) static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
{ {
struct { struct {
u64 vpid : 16; u64 vpid : 16;
@ -1312,7 +1312,7 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva)
: : "a"(&operand), "c"(ext) : "cc", "memory"); : : "a"(&operand), "c"(ext) : "cc", "memory");
} }
static inline void __invept(int ext, u64 eptp, gpa_t gpa) static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
{ {
struct { struct {
u64 eptp, gpa; u64 eptp, gpa;

View file

@ -68,7 +68,7 @@ BEGIN {
lprefix1_expr = "\\((66|!F3)\\)" lprefix1_expr = "\\((66|!F3)\\)"
lprefix2_expr = "\\(F3\\)" lprefix2_expr = "\\(F3\\)"
lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)" lprefix3_expr = "\\((F2|!F3|66&F2)\\)"
lprefix_expr = "\\((66|F2|F3)\\)" lprefix_expr = "\\((66|F2|F3)\\)"
max_lprefix = 4 max_lprefix = 4
@ -253,7 +253,7 @@ function convert_operands(count,opnd, i,j,imm,mod)
return add_flags(imm, mod) return add_flags(imm, mod)
} }
/^[0-9a-f]+\:/ { /^[0-9a-f]+:/ {
if (NR == 1) if (NR == 1)
next next
# get index # get index

View file

@ -126,7 +126,7 @@ static unsigned long dummy[2] = {0,0};
#define zin_n(r) inl(zatm_dev->base+r*4) #define zin_n(r) inl(zatm_dev->base+r*4)
#define zin(r) inl(zatm_dev->base+uPD98401_##r*4) #define zin(r) inl(zatm_dev->base+uPD98401_##r*4)
#define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4) #define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4)
#define zwait while (zin(CMR) & uPD98401_BUSY) #define zwait() do {} while (zin(CMR) & uPD98401_BUSY)
/* RX0, RX1, TX0, TX1 */ /* RX0, RX1, TX0, TX1 */
static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 }; static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 };
@ -140,7 +140,7 @@ static const int mbx_esize[NR_MBX] = { 16,16,4,4 }; /* entry size in bytes */
static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr) static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr)
{ {
zwait; zwait();
zout(value,CER); zout(value,CER);
zout(uPD98401_IND_ACC | uPD98401_IA_BALL | zout(uPD98401_IND_ACC | uPD98401_IA_BALL |
(uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR); (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
@ -149,10 +149,10 @@ static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr)
static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr) static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr)
{ {
zwait; zwait();
zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW | zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW |
(uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR); (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
zwait; zwait();
return zin(CER); return zin(CER);
} }
@ -241,7 +241,7 @@ static void refill_pool(struct atm_dev *dev,int pool)
} }
if (first) { if (first) {
spin_lock_irqsave(&zatm_dev->lock, flags); spin_lock_irqsave(&zatm_dev->lock, flags);
zwait; zwait();
zout(virt_to_bus(first),CER); zout(virt_to_bus(first),CER);
zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count, zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count,
CMR); CMR);
@ -508,9 +508,9 @@ static int open_rx_first(struct atm_vcc *vcc)
} }
if (zatm_vcc->pool < 0) return -EMSGSIZE; if (zatm_vcc->pool < 0) return -EMSGSIZE;
spin_lock_irqsave(&zatm_dev->lock, flags); spin_lock_irqsave(&zatm_dev->lock, flags);
zwait; zwait();
zout(uPD98401_OPEN_CHAN,CMR); zout(uPD98401_OPEN_CHAN,CMR);
zwait; zwait();
DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER)); DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT; chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
spin_unlock_irqrestore(&zatm_dev->lock, flags); spin_unlock_irqrestore(&zatm_dev->lock, flags);
@ -571,21 +571,21 @@ static void close_rx(struct atm_vcc *vcc)
pos = vcc->vci >> 1; pos = vcc->vci >> 1;
shift = (1-(vcc->vci & 1)) << 4; shift = (1-(vcc->vci & 1)) << 4;
zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos); zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos);
zwait; zwait();
zout(uPD98401_NOP,CMR); zout(uPD98401_NOP,CMR);
zwait; zwait();
zout(uPD98401_NOP,CMR); zout(uPD98401_NOP,CMR);
spin_unlock_irqrestore(&zatm_dev->lock, flags); spin_unlock_irqrestore(&zatm_dev->lock, flags);
} }
spin_lock_irqsave(&zatm_dev->lock, flags); spin_lock_irqsave(&zatm_dev->lock, flags);
zwait; zwait();
zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan << zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
uPD98401_CHAN_ADDR_SHIFT),CMR); uPD98401_CHAN_ADDR_SHIFT),CMR);
zwait; zwait();
udelay(10); /* why oh why ... ? */ udelay(10); /* why oh why ... ? */
zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan << zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
uPD98401_CHAN_ADDR_SHIFT),CMR); uPD98401_CHAN_ADDR_SHIFT),CMR);
zwait; zwait();
if (!(zin(CMR) & uPD98401_CHAN_ADDR)) if (!(zin(CMR) & uPD98401_CHAN_ADDR))
printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel " printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel "
"%d\n",vcc->dev->number,zatm_vcc->rx_chan); "%d\n",vcc->dev->number,zatm_vcc->rx_chan);
@ -698,7 +698,7 @@ printk("NONONONOO!!!!\n");
skb_queue_tail(&zatm_vcc->tx_queue,skb); skb_queue_tail(&zatm_vcc->tx_queue,skb);
DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+ DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+
uPD98401_TXVC_QRP)); uPD98401_TXVC_QRP));
zwait; zwait();
zout(uPD98401_TX_READY | (zatm_vcc->tx_chan << zout(uPD98401_TX_READY | (zatm_vcc->tx_chan <<
uPD98401_CHAN_ADDR_SHIFT),CMR); uPD98401_CHAN_ADDR_SHIFT),CMR);
spin_unlock_irqrestore(&zatm_dev->lock, flags); spin_unlock_irqrestore(&zatm_dev->lock, flags);
@ -890,12 +890,12 @@ static void close_tx(struct atm_vcc *vcc)
} }
spin_lock_irqsave(&zatm_dev->lock, flags); spin_lock_irqsave(&zatm_dev->lock, flags);
#if 0 #if 0
zwait; zwait();
zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR); zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
#endif #endif
zwait; zwait();
zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR); zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
zwait; zwait();
if (!(zin(CMR) & uPD98401_CHAN_ADDR)) if (!(zin(CMR) & uPD98401_CHAN_ADDR))
printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel " printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel "
"%d\n",vcc->dev->number,chan); "%d\n",vcc->dev->number,chan);
@ -925,9 +925,9 @@ static int open_tx_first(struct atm_vcc *vcc)
zatm_vcc->tx_chan = 0; zatm_vcc->tx_chan = 0;
if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0; if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
spin_lock_irqsave(&zatm_dev->lock, flags); spin_lock_irqsave(&zatm_dev->lock, flags);
zwait; zwait();
zout(uPD98401_OPEN_CHAN,CMR); zout(uPD98401_OPEN_CHAN,CMR);
zwait; zwait();
DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER)); DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT; chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
spin_unlock_irqrestore(&zatm_dev->lock, flags); spin_unlock_irqrestore(&zatm_dev->lock, flags);
@ -1557,7 +1557,7 @@ static void zatm_phy_put(struct atm_dev *dev,unsigned char value,
struct zatm_dev *zatm_dev; struct zatm_dev *zatm_dev;
zatm_dev = ZATM_DEV(dev); zatm_dev = ZATM_DEV(dev);
zwait; zwait();
zout(value,CER); zout(value,CER);
zout(uPD98401_IND_ACC | uPD98401_IA_B0 | zout(uPD98401_IND_ACC | uPD98401_IA_B0 |
(uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR); (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
@ -1569,10 +1569,10 @@ static unsigned char zatm_phy_get(struct atm_dev *dev,unsigned long addr)
struct zatm_dev *zatm_dev; struct zatm_dev *zatm_dev;
zatm_dev = ZATM_DEV(dev); zatm_dev = ZATM_DEV(dev);
zwait; zwait();
zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW | zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW |
(uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR); (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
zwait; zwait();
return zin(CER) & 0xff; return zin(CER) & 0xff;
} }

View file

@ -1699,11 +1699,41 @@ static const struct block_device_operations floppy_fops = {
.check_events = amiga_check_events, .check_events = amiga_check_events,
}; };
static struct gendisk *fd_alloc_disk(int drive)
{
struct gendisk *disk;
disk = alloc_disk(1);
if (!disk)
goto out;
disk->queue = blk_init_queue(do_fd_request, &amiflop_lock);
if (IS_ERR(disk->queue)) {
disk->queue = NULL;
goto out_put_disk;
}
unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL);
if (!unit[drive].trackbuf)
goto out_cleanup_queue;
return disk;
out_cleanup_queue:
blk_cleanup_queue(disk->queue);
disk->queue = NULL;
out_put_disk:
put_disk(disk);
out:
unit[drive].type->code = FD_NODRIVE;
return NULL;
}
static int __init fd_probe_drives(void) static int __init fd_probe_drives(void)
{ {
int drive,drives,nomem; int drive,drives,nomem;
printk(KERN_INFO "FD: probing units\nfound "); pr_info("FD: probing units\nfound");
drives=0; drives=0;
nomem=0; nomem=0;
for(drive=0;drive<FD_MAX_UNITS;drive++) { for(drive=0;drive<FD_MAX_UNITS;drive++) {
@ -1711,27 +1741,17 @@ static int __init fd_probe_drives(void)
fd_probe(drive); fd_probe(drive);
if (unit[drive].type->code == FD_NODRIVE) if (unit[drive].type->code == FD_NODRIVE)
continue; continue;
disk = alloc_disk(1);
disk = fd_alloc_disk(drive);
if (!disk) { if (!disk) {
unit[drive].type->code = FD_NODRIVE; pr_cont(" no mem for fd%d", drive);
nomem = 1;
continue; continue;
} }
unit[drive].gendisk = disk; unit[drive].gendisk = disk;
disk->queue = blk_init_queue(do_fd_request, &amiflop_lock);
if (!disk->queue) {
unit[drive].type->code = FD_NODRIVE;
continue;
}
drives++; drives++;
if ((unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL)) == NULL) {
printk("no mem for "); pr_cont(" fd%d",drive);
unit[drive].type = &drive_types[num_dr_types - 1]; /* FD_NODRIVE */
drives--;
nomem = 1;
}
printk("fd%d ",drive);
disk->major = FLOPPY_MAJOR; disk->major = FLOPPY_MAJOR;
disk->first_minor = drive; disk->first_minor = drive;
disk->fops = &floppy_fops; disk->fops = &floppy_fops;
@ -1742,11 +1762,11 @@ static int __init fd_probe_drives(void)
} }
if ((drives > 0) || (nomem == 0)) { if ((drives > 0) || (nomem == 0)) {
if (drives == 0) if (drives == 0)
printk("no drives"); pr_cont(" no drives");
printk("\n"); pr_cont("\n");
return drives; return drives;
} }
printk("\n"); pr_cont("\n");
return -ENOMEM; return -ENOMEM;
} }
@ -1837,30 +1857,6 @@ out_blkdev:
return ret; return ret;
} }
#if 0 /* not safe to unload */
static int __exit amiga_floppy_remove(struct platform_device *pdev)
{
int i;
for( i = 0; i < FD_MAX_UNITS; i++) {
if (unit[i].type->code != FD_NODRIVE) {
struct request_queue *q = unit[i].gendisk->queue;
del_gendisk(unit[i].gendisk);
put_disk(unit[i].gendisk);
kfree(unit[i].trackbuf);
if (q)
blk_cleanup_queue(q);
}
}
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
free_irq(IRQ_AMIGA_CIAA_TB, NULL);
free_irq(IRQ_AMIGA_DSKBLK, NULL);
custom.dmacon = DMAF_DISK; /* disable DMA */
amiga_chip_free(raw_buf);
unregister_blkdev(FLOPPY_MAJOR, "fd");
}
#endif
static struct platform_driver amiga_floppy_driver = { static struct platform_driver amiga_floppy_driver = {
.driver = { .driver = {
.name = "amiga-floppy", .name = "amiga-floppy",

View file

@ -566,6 +566,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
if (*ptr == 0xc0) { if (*ptr == 0xc0) {
BT_ERR("Short BCSP packet"); BT_ERR("Short BCSP packet");
kfree_skb(bcsp->rx_skb); kfree_skb(bcsp->rx_skb);
bcsp->rx_skb = NULL;
bcsp->rx_state = BCSP_W4_PKT_START; bcsp->rx_state = BCSP_W4_PKT_START;
bcsp->rx_count = 0; bcsp->rx_count = 0;
} else } else
@ -581,6 +582,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) { bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) {
BT_ERR("Error in BCSP hdr checksum"); BT_ERR("Error in BCSP hdr checksum");
kfree_skb(bcsp->rx_skb); kfree_skb(bcsp->rx_skb);
bcsp->rx_skb = NULL;
bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
bcsp->rx_count = 0; bcsp->rx_count = 0;
continue; continue;
@ -615,6 +617,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
bscp_get_crc(bcsp)); bscp_get_crc(bcsp));
kfree_skb(bcsp->rx_skb); kfree_skb(bcsp->rx_skb);
bcsp->rx_skb = NULL;
bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
bcsp->rx_count = 0; bcsp->rx_count = 0;
continue; continue;

View file

@ -417,7 +417,7 @@ static void reclaim_dma_bufs(void)
} }
} }
static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size, static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size,
int pages) int pages)
{ {
struct port_buffer *buf; struct port_buffer *buf;
@ -440,7 +440,7 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
return buf; return buf;
} }
if (is_rproc_serial(vq->vdev)) { if (is_rproc_serial(vdev)) {
/* /*
* Allocate DMA memory from ancestor. When a virtio * Allocate DMA memory from ancestor. When a virtio
* device is created by remoteproc, the DMA memory is * device is created by remoteproc, the DMA memory is
@ -450,9 +450,9 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
* DMA_MEMORY_INCLUDES_CHILDREN had been supported * DMA_MEMORY_INCLUDES_CHILDREN had been supported
* in dma-coherent.c * in dma-coherent.c
*/ */
if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent) if (!vdev->dev.parent || !vdev->dev.parent->parent)
goto free_buf; goto free_buf;
buf->dev = vq->vdev->dev.parent->parent; buf->dev = vdev->dev.parent->parent;
/* Increase device refcnt to avoid freeing it */ /* Increase device refcnt to avoid freeing it */
get_device(buf->dev); get_device(buf->dev);
@ -835,7 +835,7 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
count = min((size_t)(32 * 1024), count); count = min((size_t)(32 * 1024), count);
buf = alloc_buf(port->out_vq, count, 0); buf = alloc_buf(port->portdev->vdev, count, 0);
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
@ -954,7 +954,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
if (ret < 0) if (ret < 0)
goto error_out; goto error_out;
buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); buf = alloc_buf(port->portdev->vdev, 0, pipe->nrbufs);
if (!buf) { if (!buf) {
ret = -ENOMEM; ret = -ENOMEM;
goto error_out; goto error_out;
@ -1363,24 +1363,24 @@ static void set_console_size(struct port *port, u16 rows, u16 cols)
port->cons.ws.ws_col = cols; port->cons.ws.ws_col = cols;
} }
static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) static int fill_queue(struct virtqueue *vq, spinlock_t *lock)
{ {
struct port_buffer *buf; struct port_buffer *buf;
unsigned int nr_added_bufs; int nr_added_bufs;
int ret; int ret;
nr_added_bufs = 0; nr_added_bufs = 0;
do { do {
buf = alloc_buf(vq, PAGE_SIZE, 0); buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
if (!buf) if (!buf)
break; return -ENOMEM;
spin_lock_irq(lock); spin_lock_irq(lock);
ret = add_inbuf(vq, buf); ret = add_inbuf(vq, buf);
if (ret < 0) { if (ret < 0) {
spin_unlock_irq(lock); spin_unlock_irq(lock);
free_buf(buf, true); free_buf(buf, true);
break; return ret;
} }
nr_added_bufs++; nr_added_bufs++;
spin_unlock_irq(lock); spin_unlock_irq(lock);
@ -1400,7 +1400,6 @@ static int add_port(struct ports_device *portdev, u32 id)
char debugfs_name[16]; char debugfs_name[16];
struct port *port; struct port *port;
dev_t devt; dev_t devt;
unsigned int nr_added_bufs;
int err; int err;
port = kmalloc(sizeof(*port), GFP_KERNEL); port = kmalloc(sizeof(*port), GFP_KERNEL);
@ -1459,11 +1458,13 @@ static int add_port(struct ports_device *portdev, u32 id)
spin_lock_init(&port->outvq_lock); spin_lock_init(&port->outvq_lock);
init_waitqueue_head(&port->waitqueue); init_waitqueue_head(&port->waitqueue);
/* Fill the in_vq with buffers so the host can send us data. */ /* We can safely ignore ENOSPC because it means
nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); * the queue already has buffers. Buffers are removed
if (!nr_added_bufs) { * only by virtcons_remove(), not by unplug_port()
*/
err = fill_queue(port->in_vq, &port->inbuf_lock);
if (err < 0 && err != -ENOSPC) {
dev_err(port->dev, "Error allocating inbufs\n"); dev_err(port->dev, "Error allocating inbufs\n");
err = -ENOMEM;
goto free_device; goto free_device;
} }
@ -1986,19 +1987,40 @@ static void remove_vqs(struct ports_device *portdev)
kfree(portdev->out_vqs); kfree(portdev->out_vqs);
} }
static void remove_controlq_data(struct ports_device *portdev) static void virtcons_remove(struct virtio_device *vdev)
{ {
struct port_buffer *buf; struct ports_device *portdev;
unsigned int len; struct port *port, *port2;
if (!use_multiport(portdev)) portdev = vdev->priv;
return;
while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) spin_lock_irq(&pdrvdata_lock);
free_buf(buf, true); list_del(&portdev->list);
spin_unlock_irq(&pdrvdata_lock);
while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) /* Disable interrupts for vqs */
free_buf(buf, true); vdev->config->reset(vdev);
/* Finish up work that's lined up */
if (use_multiport(portdev))
cancel_work_sync(&portdev->control_work);
else
cancel_work_sync(&portdev->config_work);
list_for_each_entry_safe(port, port2, &portdev->ports, list)
unplug_port(port);
unregister_chrdev(portdev->chr_major, "virtio-portsdev");
/*
* When yanking out a device, we immediately lose the
* (device-side) queues. So there's no point in keeping the
* guest side around till we drop our final reference. This
* also means that any ports which are in an open state will
* have to just stop using the port, as the vqs are going
* away.
*/
remove_vqs(portdev);
kfree(portdev);
} }
/* /*
@ -2067,6 +2089,7 @@ static int virtcons_probe(struct virtio_device *vdev)
spin_lock_init(&portdev->ports_lock); spin_lock_init(&portdev->ports_lock);
INIT_LIST_HEAD(&portdev->ports); INIT_LIST_HEAD(&portdev->ports);
INIT_LIST_HEAD(&portdev->list);
virtio_device_ready(portdev->vdev); virtio_device_ready(portdev->vdev);
@ -2074,18 +2097,22 @@ static int virtcons_probe(struct virtio_device *vdev)
INIT_WORK(&portdev->control_work, &control_work_handler); INIT_WORK(&portdev->control_work, &control_work_handler);
if (multiport) { if (multiport) {
unsigned int nr_added_bufs;
spin_lock_init(&portdev->c_ivq_lock); spin_lock_init(&portdev->c_ivq_lock);
spin_lock_init(&portdev->c_ovq_lock); spin_lock_init(&portdev->c_ovq_lock);
nr_added_bufs = fill_queue(portdev->c_ivq, err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
&portdev->c_ivq_lock); if (err < 0) {
if (!nr_added_bufs) {
dev_err(&vdev->dev, dev_err(&vdev->dev,
"Error allocating buffers for control queue\n"); "Error allocating buffers for control queue\n");
err = -ENOMEM; /*
goto free_vqs; * The host might want to notify mgmt sw about device
* add failure.
*/
__send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
VIRTIO_CONSOLE_DEVICE_READY, 0);
/* Device was functional: we need full cleanup. */
virtcons_remove(vdev);
return err;
} }
} else { } else {
/* /*
@ -2116,11 +2143,6 @@ static int virtcons_probe(struct virtio_device *vdev)
return 0; return 0;
free_vqs:
/* The host might want to notify mgmt sw about device add failure */
__send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
VIRTIO_CONSOLE_DEVICE_READY, 0);
remove_vqs(portdev);
free_chrdev: free_chrdev:
unregister_chrdev(portdev->chr_major, "virtio-portsdev"); unregister_chrdev(portdev->chr_major, "virtio-portsdev");
free: free:
@ -2129,43 +2151,6 @@ fail:
return err; return err;
} }
static void virtcons_remove(struct virtio_device *vdev)
{
struct ports_device *portdev;
struct port *port, *port2;
portdev = vdev->priv;
spin_lock_irq(&pdrvdata_lock);
list_del(&portdev->list);
spin_unlock_irq(&pdrvdata_lock);
/* Disable interrupts for vqs */
vdev->config->reset(vdev);
/* Finish up work that's lined up */
if (use_multiport(portdev))
cancel_work_sync(&portdev->control_work);
else
cancel_work_sync(&portdev->config_work);
list_for_each_entry_safe(port, port2, &portdev->ports, list)
unplug_port(port);
unregister_chrdev(portdev->chr_major, "virtio-portsdev");
/*
* When yanking out a device, we immediately lose the
* (device-side) queues. So there's no point in keeping the
* guest side around till we drop our final reference. This
* also means that any ports which are in an open state will
* have to just stop using the port, as the vqs are going
* away.
*/
remove_controlq_data(portdev);
remove_vqs(portdev);
kfree(portdev);
}
static struct virtio_device_id id_table[] = { static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
{ 0 }, { 0 },
@ -2196,15 +2181,16 @@ static int virtcons_freeze(struct virtio_device *vdev)
vdev->config->reset(vdev); vdev->config->reset(vdev);
virtqueue_disable_cb(portdev->c_ivq); if (use_multiport(portdev))
virtqueue_disable_cb(portdev->c_ivq);
cancel_work_sync(&portdev->control_work); cancel_work_sync(&portdev->control_work);
cancel_work_sync(&portdev->config_work); cancel_work_sync(&portdev->config_work);
/* /*
* Once more: if control_work_handler() was running, it would * Once more: if control_work_handler() was running, it would
* enable the cb as the last step. * enable the cb as the last step.
*/ */
virtqueue_disable_cb(portdev->c_ivq); if (use_multiport(portdev))
remove_controlq_data(portdev); virtqueue_disable_cb(portdev->c_ivq);
list_for_each_entry(port, &portdev->ports, list) { list_for_each_entry(port, &portdev->ports, list) {
virtqueue_disable_cb(port->in_vq); virtqueue_disable_cb(port->in_vq);

View file

@ -227,8 +227,8 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
/* The gate clocks has mux parent. */ /* The gate clocks has mux parent. */
{MMP2_CLK_SDH0, "sdh0_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, {MMP2_CLK_SDH0, "sdh0_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, {MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_SDH1, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, {MMP2_CLK_SDH2, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_SDH1, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, {MMP2_CLK_SDH3, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock}, {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
{MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock}, {MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock},
{MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x1b, 0x1b, 0x0, 0, &disp1_lock}, {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x1b, 0x1b, 0x0, 0, &disp1_lock},

View file

@ -917,6 +917,9 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
struct freq_attr *fattr = to_attr(attr); struct freq_attr *fattr = to_attr(attr);
ssize_t ret; ssize_t ret;
if (!fattr->show)
return -EIO;
down_read(&policy->rwsem); down_read(&policy->rwsem);
if (fattr->show) if (fattr->show)
@ -936,6 +939,9 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
struct freq_attr *fattr = to_attr(attr); struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL; ssize_t ret = -EINVAL;
if (!fattr->store)
return -EIO;
get_online_cpus(); get_online_cpus();
if (!cpu_online(policy->cpu)) if (!cpu_online(policy->cpu))
@ -1724,6 +1730,9 @@ void cpufreq_resume(void)
if (!cpufreq_driver) if (!cpufreq_driver)
return; return;
if (unlikely(!cpufreq_suspended))
return;
cpufreq_suspended = false; cpufreq_suspended = false;
if (!has_target()) if (!has_target())

View file

@ -480,11 +480,10 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
if (count < sizeof(u32)) if (count < sizeof(u32))
return -EINVAL; return -EINVAL;
param.type = *(u32 *)buf; param.type = *(u32 *)buf;
count -= sizeof(u32);
buf += sizeof(u32); buf += sizeof(u32);
/* The remaining buffer is the data payload */ /* The remaining buffer is the data payload */
if (count > gsmi_dev.data_buf->length) if ((count - sizeof(u32)) > gsmi_dev.data_buf->length)
return -EINVAL; return -EINVAL;
param.data_len = count - sizeof(u32); param.data_len = count - sizeof(u32);
@ -504,7 +503,7 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
spin_unlock_irqrestore(&gsmi_dev.lock, flags); spin_unlock_irqrestore(&gsmi_dev.lock, flags);
return rc; return (rc == 0) ? count : rc;
} }

View file

@ -1180,8 +1180,7 @@ static int
ctrl_teimanager(struct manager *mgr, void *arg) ctrl_teimanager(struct manager *mgr, void *arg)
{ {
/* currently we only have one option */ /* currently we only have one option */
int *val = (int *)arg; unsigned int *val = (unsigned int *)arg;
int ret = 0;
switch (val[0]) { switch (val[0]) {
case IMCLEAR_L2: case IMCLEAR_L2:
@ -1197,9 +1196,9 @@ ctrl_teimanager(struct manager *mgr, void *arg)
test_and_clear_bit(OPTION_L1_HOLD, &mgr->options); test_and_clear_bit(OPTION_L1_HOLD, &mgr->options);
break; break;
default: default:
ret = -EINVAL; return -EINVAL;
} }
return ret; return 0;
} }
/* This function does create a L2 for fixed TEI in NT Mode */ /* This function does create a L2 for fixed TEI in NT Mode */

View file

@ -22,14 +22,6 @@
#define VERSION "1.0" #define VERSION "1.0"
#define DEBUG
#ifdef DEBUG
#define DBG(args...) printk(args)
#else
#define DBG(args...) do { } while(0)
#endif
/* If the cache is older than 800ms we'll refetch it */ /* If the cache is older than 800ms we'll refetch it */
#define MAX_AGE msecs_to_jiffies(800) #define MAX_AGE msecs_to_jiffies(800)
@ -106,13 +98,10 @@ struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id, int id,
buf[i+2] = data[3]; buf[i+2] = data[3];
buf[i+3] = data[2]; buf[i+3] = data[2];
} }
#ifdef DEBUG
DBG(KERN_DEBUG "sat %d partition %x:", sat_id, id);
for (i = 0; i < len; ++i)
DBG(" %x", buf[i]);
DBG("\n");
#endif
printk(KERN_DEBUG "sat %d partition %x:", sat_id, id);
print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET,
16, 1, buf, len, false);
if (size) if (size)
*size = len; *size = len;
return (struct smu_sdbp_header *) buf; return (struct smu_sdbp_header *) buf;
@ -132,13 +121,13 @@ static int wf_sat_read_cache(struct wf_sat *sat)
if (err < 0) if (err < 0)
return err; return err;
sat->last_read = jiffies; sat->last_read = jiffies;
#ifdef LOTSA_DEBUG #ifdef LOTSA_DEBUG
{ {
int i; int i;
DBG(KERN_DEBUG "wf_sat_get: data is"); printk(KERN_DEBUG "wf_sat_get: data is");
for (i = 0; i < 16; ++i) print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET,
DBG(" %.2x", sat->cache[i]); 16, 1, sat->cache, 16, false);
DBG("\n");
} }
#endif #endif
return 0; return 0;

View file

@ -2939,9 +2939,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags); set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock); spin_unlock(&_minor_lock);
spin_lock_irq(q->queue_lock); blk_set_queue_dying(q);
queue_flag_set(QUEUE_FLAG_DYING, q);
spin_unlock_irq(q->queue_lock);
if (dm_request_based(md) && md->kworker_task) if (dm_request_based(md) && md->kworker_task)
flush_kthread_worker(&md->kworker); flush_kthread_worker(&md->kworker);

View file

@ -763,7 +763,11 @@ static int vivid_thread_vid_cap(void *data)
if (kthread_should_stop()) if (kthread_should_stop())
break; break;
mutex_lock(&dev->mutex); if (!mutex_trylock(&dev->mutex)) {
schedule_timeout_uninterruptible(1);
continue;
}
cur_jiffies = jiffies; cur_jiffies = jiffies;
if (dev->cap_seq_resync) { if (dev->cap_seq_resync) {
dev->jiffies_vid_cap = cur_jiffies; dev->jiffies_vid_cap = cur_jiffies;
@ -916,8 +920,6 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
/* shutdown control thread */ /* shutdown control thread */
vivid_grab_controls(dev, false); vivid_grab_controls(dev, false);
mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_vid_cap); kthread_stop(dev->kthread_vid_cap);
dev->kthread_vid_cap = NULL; dev->kthread_vid_cap = NULL;
mutex_lock(&dev->mutex);
} }

View file

@ -147,7 +147,11 @@ static int vivid_thread_vid_out(void *data)
if (kthread_should_stop()) if (kthread_should_stop())
break; break;
mutex_lock(&dev->mutex); if (!mutex_trylock(&dev->mutex)) {
schedule_timeout_uninterruptible(1);
continue;
}
cur_jiffies = jiffies; cur_jiffies = jiffies;
if (dev->out_seq_resync) { if (dev->out_seq_resync) {
dev->jiffies_vid_out = cur_jiffies; dev->jiffies_vid_out = cur_jiffies;
@ -301,8 +305,6 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
/* shutdown control thread */ /* shutdown control thread */
vivid_grab_controls(dev, false); vivid_grab_controls(dev, false);
mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_vid_out); kthread_stop(dev->kthread_vid_out);
dev->kthread_vid_out = NULL; dev->kthread_vid_out = NULL;
mutex_lock(&dev->mutex);
} }

View file

@ -151,7 +151,11 @@ static int vivid_thread_sdr_cap(void *data)
if (kthread_should_stop()) if (kthread_should_stop())
break; break;
mutex_lock(&dev->mutex); if (!mutex_trylock(&dev->mutex)) {
schedule_timeout_uninterruptible(1);
continue;
}
cur_jiffies = jiffies; cur_jiffies = jiffies;
if (dev->sdr_cap_seq_resync) { if (dev->sdr_cap_seq_resync) {
dev->jiffies_sdr_cap = cur_jiffies; dev->jiffies_sdr_cap = cur_jiffies;
@ -311,10 +315,8 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq)
} }
/* shutdown control thread */ /* shutdown control thread */
mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_sdr_cap); kthread_stop(dev->kthread_sdr_cap);
dev->kthread_sdr_cap = NULL; dev->kthread_sdr_cap = NULL;
mutex_lock(&dev->mutex);
} }
const struct vb2_ops vivid_sdr_cap_qops = { const struct vb2_ops vivid_sdr_cap_qops = {

View file

@ -253,9 +253,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
if (vb2_is_streaming(&dev->vb_vid_out_q)) if (vb2_is_streaming(&dev->vb_vid_out_q))
dev->can_loop_video = vivid_vid_can_loop(dev); dev->can_loop_video = vivid_vid_can_loop(dev);
if (dev->kthread_vid_cap)
return 0;
dev->vid_cap_seq_count = 0; dev->vid_cap_seq_count = 0;
dprintk(dev, 1, "%s\n", __func__); dprintk(dev, 1, "%s\n", __func__);
for (i = 0; i < VIDEO_MAX_FRAME; i++) for (i = 0; i < VIDEO_MAX_FRAME; i++)

View file

@ -173,9 +173,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
if (vb2_is_streaming(&dev->vb_vid_cap_q)) if (vb2_is_streaming(&dev->vb_vid_cap_q))
dev->can_loop_video = vivid_vid_can_loop(dev); dev->can_loop_video = vivid_vid_can_loop(dev);
if (dev->kthread_vid_out)
return 0;
dev->vid_out_seq_count = 0; dev->vid_out_seq_count = 0;
dprintk(dev, 1, "%s\n", __func__); dprintk(dev, 1, "%s\n", __func__);
if (dev->start_streaming_error) { if (dev->start_streaming_error) {

View file

@ -1644,8 +1644,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
spin_unlock_irqrestore(&ictx->kc_lock, flags); spin_unlock_irqrestore(&ictx->kc_lock, flags);
/* send touchscreen events through input subsystem if touchpad data */ /* send touchscreen events through input subsystem if touchpad data */
if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 && if (ictx->touch && len == 8 && buf[7] == 0x86) {
buf[7] == 0x86) {
imon_touch_event(ictx, buf); imon_touch_event(ictx, buf);
return; return;

View file

@ -508,6 +508,9 @@ static int flexcop_usb_probe(struct usb_interface *intf,
struct flexcop_device *fc = NULL; struct flexcop_device *fc = NULL;
int ret; int ret;
if (intf->cur_altsetting->desc.bNumEndpoints < 1)
return -ENODEV;
if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) { if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) {
err("out of memory\n"); err("out of memory\n");
return -ENOMEM; return -ENOMEM;

View file

@ -435,7 +435,8 @@ static int cxusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
u8 ircode[4]; u8 ircode[4];
int i; int i;
cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4); if (cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4) < 0)
return 0;
*event = 0; *event = 0;
*state = REMOTE_NO_KEY_PRESSED; *state = REMOTE_NO_KEY_PRESSED;

View file

@ -156,12 +156,6 @@ static struct max8997_platform_data *max8997_i2c_parse_dt_pdata(
pd->ono = irq_of_parse_and_map(dev->of_node, 1); pd->ono = irq_of_parse_and_map(dev->of_node, 1);
/*
* ToDo: the 'wakeup' member in the platform data is more of a linux
* specfic information. Hence, there is no binding for that yet and
* not parsed here.
*/
return pd; return pd;
} }
@ -249,7 +243,7 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
*/ */
/* MAX8997 has a power button input. */ /* MAX8997 has a power button input. */
device_init_wakeup(max8997->dev, pdata->wakeup); device_init_wakeup(max8997->dev, true);
return ret; return ret;

View file

@ -278,7 +278,8 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
if (ret) if (ret)
goto out; goto out;
adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2; adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 |
MC13XXX_ADC0_CHRGRAWDIV;
adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC; adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC;
if (channel > 7) if (channel > 7)

View file

@ -271,7 +271,7 @@ static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val)
dma_fail: dma_fail:
if (!x100) if (!x100)
dma_pool_free(ep->remote_dev->signal_pool, status, dma_pool_free(ep->remote_dev->signal_pool, status,
status->src_dma_addr); src - offsetof(struct scif_status, val));
alloc_fail: alloc_fail:
return err; return err;
} }

View file

@ -2041,8 +2041,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
do_data_tag = (card->ext_csd.data_tag_unit_size) && do_data_tag = (card->ext_csd.data_tag_unit_size) &&
(prq->cmd_flags & REQ_META) && (prq->cmd_flags & REQ_META) &&
(rq_data_dir(prq) == WRITE) && (rq_data_dir(prq) == WRITE) &&
((brq->data.blocks * brq->data.blksz) >= blk_rq_bytes(prq) >= card->ext_csd.data_tag_unit_size;
card->ext_csd.data_tag_unit_size);
/* Argument of CMD23 */ /* Argument of CMD23 */
packed_cmd_hdr[(i * 2)] = cpu_to_le32( packed_cmd_hdr[(i * 2)] = cpu_to_le32(
(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) | (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |

View file

@ -846,6 +846,7 @@ static void msdc_start_command(struct msdc_host *host,
WARN_ON(host->cmd); WARN_ON(host->cmd);
host->cmd = cmd; host->cmd = cmd;
mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
if (!msdc_cmd_is_ready(host, mrq, cmd)) if (!msdc_cmd_is_ready(host, mrq, cmd))
return; return;
@ -857,7 +858,6 @@ static void msdc_start_command(struct msdc_host *host,
cmd->error = 0; cmd->error = 0;
rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd); rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd);
mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask); sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask);
writel(cmd->arg, host->base + SDC_ARG); writel(cmd->arg, host->base + SDC_ARG);

View file

@ -1074,7 +1074,7 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
break; break;
} }
return 0; return ret;
} }
static void bcmgenet_power_up(struct bcmgenet_priv *priv, static void bcmgenet_power_up(struct bcmgenet_priv *priv,

View file

@ -65,9 +65,15 @@
* *
* The 40 bit 82580 SYSTIM overflows every * The 40 bit 82580 SYSTIM overflows every
* 2^40 * 10^-9 / 60 = 18.3 minutes. * 2^40 * 10^-9 / 60 = 18.3 minutes.
*
* SYSTIM is converted to real time using a timecounter. As
* timecounter_cyc2time() allows old timestamps, the timecounter
* needs to be updated at least once per half of the SYSTIM interval.
* Scheduling of delayed work is not very accurate, so we aim for 8
* minutes to be sure the actual interval is shorter than 9.16 minutes.
*/ */
#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9) #define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 8)
#define IGB_PTP_TX_TIMEOUT (HZ * 15) #define IGB_PTP_TX_TIMEOUT (HZ * 15)
#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT) #define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1) #define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)

View file

@ -1667,6 +1667,7 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
err = mlx4_en_get_flow(dev, cmd, cmd->fs.location); err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
break; break;
case ETHTOOL_GRXCLSRLALL: case ETHTOOL_GRXCLSRLALL:
cmd->data = MAX_NUM_OF_FS_RULES;
while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) { while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
err = mlx4_en_get_flow(dev, cmd, i); err = mlx4_en_get_flow(dev, cmd, i);
if (!err) if (!err)

View file

@ -883,7 +883,7 @@ static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid,
struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_adapter *adapter = netdev_priv(netdev);
if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0; return 1;
switch (capid) { switch (capid) {
case DCB_CAP_ATTR_PG: case DCB_CAP_ATTR_PG:

View file

@ -1320,7 +1320,8 @@ void efx_ptp_remove(struct efx_nic *efx)
(void)efx_ptp_disable(efx); (void)efx_ptp_disable(efx);
cancel_work_sync(&efx->ptp_data->work); cancel_work_sync(&efx->ptp_data->work);
cancel_work_sync(&efx->ptp_data->pps_work); if (efx->ptp_data->pps_workwq)
cancel_work_sync(&efx->ptp_data->pps_work);
skb_queue_purge(&efx->ptp_data->rxq); skb_queue_purge(&efx->ptp_data->rxq);
skb_queue_purge(&efx->ptp_data->txq); skb_queue_purge(&efx->ptp_data->txq);

View file

@ -236,7 +236,7 @@ static void ntb_netdev_tx_timer(unsigned long data)
struct ntb_netdev *dev = netdev_priv(ndev); struct ntb_netdev *dev = netdev_priv(ndev);
if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) { if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) {
mod_timer(&dev->tx_timer, jiffies + msecs_to_jiffies(tx_time)); mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time));
} else { } else {
/* Make sure anybody stopping the queue after this sees the new /* Make sure anybody stopping the queue after this sees the new
* value of ntb_transport_tx_free_entry() * value of ntb_transport_tx_free_entry()

View file

@ -5484,7 +5484,7 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
we have to add a spin lock... */ we have to add a spin lock... */
rc = readBSSListRid(ai, doLoseSync, &BSSList_rid); rc = readBSSListRid(ai, doLoseSync, &BSSList_rid);
while(rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) { while(rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) {
ptr += sprintf(ptr, "%pM %*s rssi = %d", ptr += sprintf(ptr, "%pM %.*s rssi = %d",
BSSList_rid.bssid, BSSList_rid.bssid,
(int)BSSList_rid.ssidLen, (int)BSSList_rid.ssidLen,
BSSList_rid.ssid, BSSList_rid.ssid,

View file

@ -4114,7 +4114,7 @@ static void ar9003_hw_thermometer_apply(struct ath_hw *ah)
static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah) static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
{ {
u32 data, ko, kg; u32 data = 0, ko, kg;
if (!AR_SREV_9462_20_OR_LATER(ah)) if (!AR_SREV_9462_20_OR_LATER(ah))
return; return;

View file

@ -502,6 +502,7 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
} }
spin_lock_bh(&wl->lock); spin_lock_bh(&wl->lock);
wl->wlc->vif = vif;
wl->mute_tx = false; wl->mute_tx = false;
brcms_c_mute(wl->wlc, false); brcms_c_mute(wl->wlc, false);
if (vif->type == NL80211_IFTYPE_STATION) if (vif->type == NL80211_IFTYPE_STATION)
@ -519,6 +520,11 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
static void static void
brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{ {
struct brcms_info *wl = hw->priv;
spin_lock_bh(&wl->lock);
wl->wlc->vif = NULL;
spin_unlock_bh(&wl->lock);
} }
static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed) static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
@ -840,8 +846,8 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw,
status = brcms_c_aggregatable(wl->wlc, tid); status = brcms_c_aggregatable(wl->wlc, tid);
spin_unlock_bh(&wl->lock); spin_unlock_bh(&wl->lock);
if (!status) { if (!status) {
brcms_err(wl->wlc->hw->d11core, brcms_dbg_ht(wl->wlc->hw->d11core,
"START: tid %d is not agg\'able\n", tid); "START: tid %d is not agg\'able\n", tid);
return -EINVAL; return -EINVAL;
} }
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
@ -937,6 +943,25 @@ static void brcms_ops_set_tsf(struct ieee80211_hw *hw,
spin_unlock_bh(&wl->lock); spin_unlock_bh(&wl->lock);
} }
static int brcms_ops_beacon_set_tim(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, bool set)
{
struct brcms_info *wl = hw->priv;
struct sk_buff *beacon = NULL;
u16 tim_offset = 0;
spin_lock_bh(&wl->lock);
if (wl->wlc->vif)
beacon = ieee80211_beacon_get_tim(hw, wl->wlc->vif,
&tim_offset, NULL);
if (beacon)
brcms_c_set_new_beacon(wl->wlc, beacon, tim_offset,
wl->wlc->vif->bss_conf.dtim_period);
spin_unlock_bh(&wl->lock);
return 0;
}
static const struct ieee80211_ops brcms_ops = { static const struct ieee80211_ops brcms_ops = {
.tx = brcms_ops_tx, .tx = brcms_ops_tx,
.start = brcms_ops_start, .start = brcms_ops_start,
@ -955,6 +980,7 @@ static const struct ieee80211_ops brcms_ops = {
.flush = brcms_ops_flush, .flush = brcms_ops_flush,
.get_tsf = brcms_ops_get_tsf, .get_tsf = brcms_ops_get_tsf,
.set_tsf = brcms_ops_set_tsf, .set_tsf = brcms_ops_set_tsf,
.set_tim = brcms_ops_beacon_set_tim,
}; };
void brcms_dpc(unsigned long data) void brcms_dpc(unsigned long data)

View file

@ -563,6 +563,7 @@ struct brcms_c_info {
struct wiphy *wiphy; struct wiphy *wiphy;
struct scb pri_scb; struct scb pri_scb;
struct ieee80211_vif *vif;
struct sk_buff *beacon; struct sk_buff *beacon;
u16 beacon_tim_offset; u16 beacon_tim_offset;

View file

@ -365,11 +365,20 @@ mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
struct mwifiex_power_cfg power_cfg; struct mwifiex_power_cfg power_cfg;
int dbm = MBM_TO_DBM(mbm); int dbm = MBM_TO_DBM(mbm);
if (type == NL80211_TX_POWER_FIXED) { switch (type) {
case NL80211_TX_POWER_FIXED:
power_cfg.is_power_auto = 0; power_cfg.is_power_auto = 0;
power_cfg.is_power_fixed = 1;
power_cfg.power_level = dbm; power_cfg.power_level = dbm;
} else { break;
case NL80211_TX_POWER_LIMITED:
power_cfg.is_power_auto = 0;
power_cfg.is_power_fixed = 0;
power_cfg.power_level = dbm;
break;
case NL80211_TX_POWER_AUTOMATIC:
power_cfg.is_power_auto = 1; power_cfg.is_power_auto = 1;
break;
} }
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);

View file

@ -256,6 +256,7 @@ struct mwifiex_ds_encrypt_key {
struct mwifiex_power_cfg { struct mwifiex_power_cfg {
u32 is_power_auto; u32 is_power_auto;
u32 is_power_fixed;
u32 power_level; u32 power_level;
}; };

View file

@ -666,6 +666,9 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
txp_cfg = (struct host_cmd_ds_txpwr_cfg *) buf; txp_cfg = (struct host_cmd_ds_txpwr_cfg *) buf;
txp_cfg->action = cpu_to_le16(HostCmd_ACT_GEN_SET); txp_cfg->action = cpu_to_le16(HostCmd_ACT_GEN_SET);
if (!power_cfg->is_power_auto) { if (!power_cfg->is_power_auto) {
u16 dbm_min = power_cfg->is_power_fixed ?
dbm : priv->min_tx_power_level;
txp_cfg->mode = cpu_to_le32(1); txp_cfg->mode = cpu_to_le32(1);
pg_tlv = (struct mwifiex_types_power_group *) pg_tlv = (struct mwifiex_types_power_group *)
(buf + sizeof(struct host_cmd_ds_txpwr_cfg)); (buf + sizeof(struct host_cmd_ds_txpwr_cfg));
@ -680,7 +683,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
pg->last_rate_code = 0x03; pg->last_rate_code = 0x03;
pg->modulation_class = MOD_CLASS_HR_DSSS; pg->modulation_class = MOD_CLASS_HR_DSSS;
pg->power_step = 0; pg->power_step = 0;
pg->power_min = (s8) dbm; pg->power_min = (s8) dbm_min;
pg->power_max = (s8) dbm; pg->power_max = (s8) dbm;
pg++; pg++;
/* Power group for modulation class OFDM */ /* Power group for modulation class OFDM */
@ -688,7 +691,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
pg->last_rate_code = 0x07; pg->last_rate_code = 0x07;
pg->modulation_class = MOD_CLASS_OFDM; pg->modulation_class = MOD_CLASS_OFDM;
pg->power_step = 0; pg->power_step = 0;
pg->power_min = (s8) dbm; pg->power_min = (s8) dbm_min;
pg->power_max = (s8) dbm; pg->power_max = (s8) dbm;
pg++; pg++;
/* Power group for modulation class HTBW20 */ /* Power group for modulation class HTBW20 */
@ -696,7 +699,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
pg->last_rate_code = 0x20; pg->last_rate_code = 0x20;
pg->modulation_class = MOD_CLASS_HT; pg->modulation_class = MOD_CLASS_HT;
pg->power_step = 0; pg->power_step = 0;
pg->power_min = (s8) dbm; pg->power_min = (s8) dbm_min;
pg->power_max = (s8) dbm; pg->power_max = (s8) dbm;
pg->ht_bandwidth = HT_BW_20; pg->ht_bandwidth = HT_BW_20;
pg++; pg++;
@ -705,7 +708,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
pg->last_rate_code = 0x20; pg->last_rate_code = 0x20;
pg->modulation_class = MOD_CLASS_HT; pg->modulation_class = MOD_CLASS_HT;
pg->power_step = 0; pg->power_step = 0;
pg->power_min = (s8) dbm; pg->power_min = (s8) dbm_min;
pg->power_max = (s8) dbm; pg->power_max = (s8) dbm;
pg->ht_bandwidth = HT_BW_40; pg->ht_bandwidth = HT_BW_40;
} }

View file

@ -5331,6 +5331,7 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
break; break;
case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_TKIP:
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
break;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }

View file

@ -234,7 +234,7 @@ static int _rtl92d_fw_init(struct ieee80211_hw *hw)
rtl_read_byte(rtlpriv, FW_MAC1_READY)); rtl_read_byte(rtlpriv, FW_MAC1_READY));
} }
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
"Polling FW ready fail!! REG_MCUFWDL:0x%08ul\n", "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n",
rtl_read_dword(rtlpriv, REG_MCUFWDL)); rtl_read_dword(rtlpriv, REG_MCUFWDL));
return -1; return -1;
} }

View file

@ -66,7 +66,7 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy,
out: out:
mutex_unlock(&wl->mutex); mutex_unlock(&wl->mutex);
return 0; return ret;
} }
static int static int

View file

@ -330,7 +330,7 @@ static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
return 0; return 0;
} }
static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector) static inline u64 ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
{ {
u64 shift, mask; u64 shift, mask;

View file

@ -42,6 +42,7 @@
#define PCIE_RC_K2HK 0xb008 #define PCIE_RC_K2HK 0xb008
#define PCIE_RC_K2E 0xb009 #define PCIE_RC_K2E 0xb009
#define PCIE_RC_K2L 0xb00a #define PCIE_RC_K2L 0xb00a
#define PCIE_RC_K2G 0xb00b
#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp) #define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
@ -56,6 +57,8 @@ static void quirk_limit_mrrs(struct pci_dev *dev)
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L), { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
{ 0, }, { 0, },
}; };

View file

@ -967,15 +967,12 @@ enum zynq_io_standards {
zynq_iostd_max zynq_iostd_max
}; };
/** /*
* enum zynq_pin_config_param - possible pin configuration parameters * PIN_CONFIG_IOSTANDARD: if the pin can select an IO standard, the argument to
* @PIN_CONFIG_IOSTANDARD: if the pin can select an IO standard, the argument to
* this parameter (on a custom format) tells the driver which alternative * this parameter (on a custom format) tells the driver which alternative
* IO standard to use. * IO standard to use.
*/ */
enum zynq_pin_config_param { #define PIN_CONFIG_IOSTANDARD (PIN_CONFIG_END + 1)
PIN_CONFIG_IOSTANDARD = PIN_CONFIG_END + 1,
};
static const struct pinconf_generic_params zynq_dt_params[] = { static const struct pinconf_generic_params zynq_dt_params[] = {
{"io-standard", PIN_CONFIG_IOSTANDARD, zynq_iostd_lvcmos18}, {"io-standard", PIN_CONFIG_IOSTANDARD, zynq_iostd_lvcmos18},

View file

@ -793,10 +793,23 @@ static int pmic_gpio_probe(struct platform_device *pdev)
goto err_chip; goto err_chip;
} }
ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, npins); /*
if (ret) { * For DeviceTree-supported systems, the gpio core checks the
dev_err(dev, "failed to add pin range\n"); * pinctrl's device node for the "gpio-ranges" property.
goto err_range; * If it is present, it takes care of adding the pin ranges
* for the driver. In this case the driver can skip ahead.
*
* In order to remain compatible with older, existing DeviceTree
* files which don't set the "gpio-ranges" property or systems that
* utilize ACPI the driver has to call gpiochip_add_pin_range().
*/
if (!of_property_read_bool(dev->of_node, "gpio-ranges")) {
ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0,
npins);
if (ret) {
dev_err(dev, "failed to add pin range\n");
goto err_range;
}
} }
return 0; return 0;

View file

@ -566,6 +566,7 @@ config ASUS_WMI
config ASUS_NB_WMI config ASUS_NB_WMI
tristate "Asus Notebook WMI Driver" tristate "Asus Notebook WMI Driver"
depends on ASUS_WMI depends on ASUS_WMI
depends on SERIO_I8042 || SERIO_I8042 = n
---help--- ---help---
This is a driver for newer Asus notebooks. It adds extra features This is a driver for newer Asus notebooks. It adds extra features
like wireless radio and bluetooth control, leds, hotkeys, backlight... like wireless radio and bluetooth control, leds, hotkeys, backlight...

View file

@ -27,6 +27,7 @@
#include <linux/input/sparse-keymap.h> #include <linux/input/sparse-keymap.h>
#include <linux/fb.h> #include <linux/fb.h>
#include <linux/dmi.h> #include <linux/dmi.h>
#include <linux/i8042.h>
#include "asus-wmi.h" #include "asus-wmi.h"
@ -55,8 +56,34 @@ MODULE_PARM_DESC(wapf, "WAPF value");
static struct quirk_entry *quirks; static struct quirk_entry *quirks;
static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str,
struct serio *port)
{
static bool extended;
bool ret = false;
if (str & I8042_STR_AUXDATA)
return false;
if (unlikely(data == 0xe1)) {
extended = true;
ret = true;
} else if (unlikely(extended)) {
extended = false;
ret = true;
}
return ret;
}
static struct quirk_entry quirk_asus_unknown = { static struct quirk_entry quirk_asus_unknown = {
.wapf = 0, .wapf = 0,
.wmi_backlight_set_devstate = true,
};
static struct quirk_entry quirk_asus_q500a = {
.i8042_filter = asus_q500a_i8042_filter,
.wmi_backlight_set_devstate = true,
}; };
/* /*
@ -67,15 +94,42 @@ static struct quirk_entry quirk_asus_unknown = {
static struct quirk_entry quirk_asus_x55u = { static struct quirk_entry quirk_asus_x55u = {
.wapf = 4, .wapf = 4,
.wmi_backlight_power = true, .wmi_backlight_power = true,
.wmi_backlight_set_devstate = true,
.no_display_toggle = true, .no_display_toggle = true,
}; };
static struct quirk_entry quirk_asus_wapf4 = { static struct quirk_entry quirk_asus_wapf4 = {
.wapf = 4, .wapf = 4,
.wmi_backlight_set_devstate = true,
}; };
static struct quirk_entry quirk_asus_x200ca = { static struct quirk_entry quirk_asus_x200ca = {
.wapf = 2, .wapf = 2,
.wmi_backlight_set_devstate = true,
};
static struct quirk_entry quirk_no_rfkill = {
.no_rfkill = true,
};
static struct quirk_entry quirk_no_rfkill_wapf4 = {
.wapf = 4,
.no_rfkill = true,
};
static struct quirk_entry quirk_asus_ux303ub = {
.wmi_backlight_native = true,
.wmi_backlight_set_devstate = true,
};
static struct quirk_entry quirk_asus_x550lb = {
.wmi_backlight_set_devstate = true,
.xusb2pr = 0x01D9,
};
static struct quirk_entry quirk_asus_forceals = {
.wmi_backlight_set_devstate = true,
.wmi_force_als_set = true,
}; };
static int dmi_matched(const struct dmi_system_id *dmi) static int dmi_matched(const struct dmi_system_id *dmi)
@ -85,6 +139,15 @@ static int dmi_matched(const struct dmi_system_id *dmi)
} }
static const struct dmi_system_id asus_quirks[] = { static const struct dmi_system_id asus_quirks[] = {
{
.callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. Q500A",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "Q500A"),
},
.driver_data = &quirk_asus_q500a,
},
{ {
.callback = dmi_matched, .callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. U32U", .ident = "ASUSTeK COMPUTER INC. U32U",
@ -160,7 +223,7 @@ static const struct dmi_system_id asus_quirks[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X456UF"), DMI_MATCH(DMI_PRODUCT_NAME, "X456UF"),
}, },
.driver_data = &quirk_asus_wapf4, .driver_data = &quirk_no_rfkill_wapf4,
}, },
{ {
.callback = dmi_matched, .callback = dmi_matched,
@ -315,11 +378,85 @@ static const struct dmi_system_id asus_quirks[] = {
}, },
.driver_data = &quirk_asus_x200ca, .driver_data = &quirk_asus_x200ca,
}, },
{
.callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. X555UB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X555UB"),
},
.driver_data = &quirk_no_rfkill,
},
{
.callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. N552VW",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "N552VW"),
},
.driver_data = &quirk_no_rfkill,
},
{
.callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. U303LB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "U303LB"),
},
.driver_data = &quirk_no_rfkill,
},
{
.callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. Z550MA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "Z550MA"),
},
.driver_data = &quirk_no_rfkill,
},
{
.callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. UX303UB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "UX303UB"),
},
.driver_data = &quirk_asus_ux303ub,
},
{
.callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. UX330UAK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "UX330UAK"),
},
.driver_data = &quirk_asus_forceals,
},
{
.callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. X550LB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X550LB"),
},
.driver_data = &quirk_asus_x550lb,
},
{
.callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. UX430UQ",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "UX430UQ"),
},
.driver_data = &quirk_asus_forceals,
},
{}, {},
}; };
static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver) static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
{ {
int ret;
quirks = &quirk_asus_unknown; quirks = &quirk_asus_unknown;
dmi_check_system(asus_quirks); dmi_check_system(asus_quirks);
@ -331,6 +468,15 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
quirks->wapf = wapf; quirks->wapf = wapf;
else else
wapf = quirks->wapf; wapf = quirks->wapf;
if (quirks->i8042_filter) {
ret = i8042_install_filter(quirks->i8042_filter);
if (ret) {
pr_warn("Unable to install key filter\n");
return;
}
pr_info("Using i8042 filter function for receiving events\n");
}
} }
static const struct key_entry asus_nb_wmi_keymap[] = { static const struct key_entry asus_nb_wmi_keymap[] = {

View file

@ -117,6 +117,7 @@ MODULE_LICENSE("GPL");
#define ASUS_WMI_DEVID_LED6 0x00020016 #define ASUS_WMI_DEVID_LED6 0x00020016
/* Backlight and Brightness */ /* Backlight and Brightness */
#define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */
#define ASUS_WMI_DEVID_BACKLIGHT 0x00050011 #define ASUS_WMI_DEVID_BACKLIGHT 0x00050011
#define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012 #define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012
#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021 #define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021
@ -158,6 +159,9 @@ MODULE_LICENSE("GPL");
#define ASUS_FAN_CTRL_MANUAL 1 #define ASUS_FAN_CTRL_MANUAL 1
#define ASUS_FAN_CTRL_AUTO 2 #define ASUS_FAN_CTRL_AUTO 2
#define USB_INTEL_XUSB2PR 0xD0
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
struct bios_args { struct bios_args {
u32 arg0; u32 arg0;
u32 arg1; u32 arg1;
@ -1082,6 +1086,38 @@ exit:
return result; return result;
} }
static void asus_wmi_set_xusb2pr(struct asus_wmi *asus)
{
struct pci_dev *xhci_pdev;
u32 orig_ports_available;
u32 ports_available = asus->driver->quirks->xusb2pr;
xhci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI,
NULL);
if (!xhci_pdev)
return;
pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
&orig_ports_available);
pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
cpu_to_le32(ports_available));
pr_info("set USB_INTEL_XUSB2PR old: 0x%04x, new: 0x%04x\n",
orig_ports_available, ports_available);
}
/*
* Some devices dont support or have borcken get_als method
* but still support set method.
*/
static void asus_wmi_set_als(void)
{
asus_wmi_set_devstate(ASUS_WMI_DEVID_ALS_ENABLE, 1, NULL);
}
/* /*
* Hwmon device * Hwmon device
*/ */
@ -1733,6 +1769,7 @@ ASUS_WMI_CREATE_DEVICE_ATTR(touchpad, 0644, ASUS_WMI_DEVID_TOUCHPAD);
ASUS_WMI_CREATE_DEVICE_ATTR(camera, 0644, ASUS_WMI_DEVID_CAMERA); ASUS_WMI_CREATE_DEVICE_ATTR(camera, 0644, ASUS_WMI_DEVID_CAMERA);
ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER); ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER);
ASUS_WMI_CREATE_DEVICE_ATTR(lid_resume, 0644, ASUS_WMI_DEVID_LID_RESUME); ASUS_WMI_CREATE_DEVICE_ATTR(lid_resume, 0644, ASUS_WMI_DEVID_LID_RESUME);
ASUS_WMI_CREATE_DEVICE_ATTR(als_enable, 0644, ASUS_WMI_DEVID_ALS_ENABLE);
static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr, static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
@ -1759,6 +1796,7 @@ static struct attribute *platform_attributes[] = {
&dev_attr_cardr.attr, &dev_attr_cardr.attr,
&dev_attr_touchpad.attr, &dev_attr_touchpad.attr,
&dev_attr_lid_resume.attr, &dev_attr_lid_resume.attr,
&dev_attr_als_enable.attr,
NULL NULL
}; };
@ -1779,6 +1817,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
devid = ASUS_WMI_DEVID_TOUCHPAD; devid = ASUS_WMI_DEVID_TOUCHPAD;
else if (attr == &dev_attr_lid_resume.attr) else if (attr == &dev_attr_lid_resume.attr)
devid = ASUS_WMI_DEVID_LID_RESUME; devid = ASUS_WMI_DEVID_LID_RESUME;
else if (attr == &dev_attr_als_enable.attr)
devid = ASUS_WMI_DEVID_ALS_ENABLE;
if (devid != -1) if (devid != -1)
ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0); ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
@ -2067,9 +2107,14 @@ static int asus_wmi_add(struct platform_device *pdev)
if (err) if (err)
goto fail_leds; goto fail_leds;
err = asus_wmi_rfkill_init(asus); if (!asus->driver->quirks->no_rfkill) {
if (err) err = asus_wmi_rfkill_init(asus);
goto fail_rfkill; if (err)
goto fail_rfkill;
}
if (asus->driver->quirks->wmi_force_als_set)
asus_wmi_set_als();
/* Some Asus desktop boards export an acpi-video backlight interface, /* Some Asus desktop boards export an acpi-video backlight interface,
stop this from showing up */ stop this from showing up */
@ -2080,11 +2125,17 @@ static int asus_wmi_add(struct platform_device *pdev)
if (asus->driver->quirks->wmi_backlight_power) if (asus->driver->quirks->wmi_backlight_power)
acpi_video_set_dmi_backlight_type(acpi_backlight_vendor); acpi_video_set_dmi_backlight_type(acpi_backlight_vendor);
if (asus->driver->quirks->wmi_backlight_native)
acpi_video_set_dmi_backlight_type(acpi_backlight_native);
if (asus->driver->quirks->xusb2pr)
asus_wmi_set_xusb2pr(asus);
if (acpi_video_get_backlight_type() == acpi_backlight_vendor) { if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
err = asus_wmi_backlight_init(asus); err = asus_wmi_backlight_init(asus);
if (err && err != -ENODEV) if (err && err != -ENODEV)
goto fail_backlight; goto fail_backlight;
} else } else if (asus->driver->quirks->wmi_backlight_set_devstate)
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL); err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
status = wmi_install_notify_handler(asus->driver->event_guid, status = wmi_install_notify_handler(asus->driver->event_guid,

View file

@ -28,6 +28,7 @@
#define _ASUS_WMI_H_ #define _ASUS_WMI_H_
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/i8042.h>
#define ASUS_WMI_KEY_IGNORE (-1) #define ASUS_WMI_KEY_IGNORE (-1)
#define ASUS_WMI_BRN_DOWN 0x20 #define ASUS_WMI_BRN_DOWN 0x20
@ -38,10 +39,14 @@ struct key_entry;
struct asus_wmi; struct asus_wmi;
struct quirk_entry { struct quirk_entry {
bool no_rfkill;
bool hotplug_wireless; bool hotplug_wireless;
bool scalar_panel_brightness; bool scalar_panel_brightness;
bool store_backlight_power; bool store_backlight_power;
bool wmi_backlight_power; bool wmi_backlight_power;
bool wmi_backlight_native;
bool wmi_backlight_set_devstate;
bool wmi_force_als_set;
int wapf; int wapf;
/* /*
* For machines with AMD graphic chips, it will send out WMI event * For machines with AMD graphic chips, it will send out WMI event
@ -50,6 +55,10 @@ struct quirk_entry {
* and let the ACPI interrupt to send out the key event. * and let the ACPI interrupt to send out the key event.
*/ */
int no_display_toggle; int no_display_toggle;
u32 xusb2pr;
bool (*i8042_filter)(unsigned char data, unsigned char str,
struct serio *serio);
}; };
struct asus_wmi_driver { struct asus_wmi_driver {

View file

@ -106,7 +106,7 @@ static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len)
*/ */
static int s35390a_reset(struct s35390a *s35390a, char *status1) static int s35390a_reset(struct s35390a *s35390a, char *status1)
{ {
char buf; u8 buf;
int ret; int ret;
unsigned initcount = 0; unsigned initcount = 0;

View file

@ -1972,6 +1972,11 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
xferred -= psge->length; xferred -= psge->length;
} else { } else {
/* Partial SG entry done */ /* Partial SG entry done */
pci_dma_sync_single_for_cpu(srb->dcb->
acb->dev,
srb->sg_bus_addr,
SEGMENTX_LEN,
PCI_DMA_TODEVICE);
psge->length -= xferred; psge->length -= xferred;
psge->address += xferred; psge->address += xferred;
srb->sg_index = idx; srb->sg_index = idx;
@ -3450,14 +3455,12 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
} }
} }
if (dir != PCI_DMA_NONE && scsi_sg_count(cmd))
pci_dma_sync_sg_for_cpu(acb->dev, scsi_sglist(cmd),
scsi_sg_count(cmd), dir);
ckc_only = 0; ckc_only = 0;
/* Check Error Conditions */ /* Check Error Conditions */
ckc_e: ckc_e:
pci_unmap_srb(acb, srb);
if (cmd->cmnd[0] == INQUIRY) { if (cmd->cmnd[0] == INQUIRY) {
unsigned char *base = NULL; unsigned char *base = NULL;
struct ScsiInqData *ptr; struct ScsiInqData *ptr;
@ -3511,7 +3514,6 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
cmd, cmd->result); cmd, cmd->result);
srb_free_insert(acb, srb); srb_free_insert(acb, srb);
} }
pci_unmap_srb(acb, srb);
cmd->scsi_done(cmd); cmd->scsi_done(cmd);
waiting_process_next(acb); waiting_process_next(acb);

View file

@ -3500,6 +3500,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
case START_STOP: case START_STOP:
scb->scsi_cmd->result = DID_OK << 16; scb->scsi_cmd->result = DID_OK << 16;
break;
case TEST_UNIT_READY: case TEST_UNIT_READY:
case INQUIRY: case INQUIRY:

View file

@ -2717,9 +2717,9 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq)
* the task management request. * the task management request.
* @task_request: the handle to the task request object to start. * @task_request: the handle to the task request object to start.
*/ */
enum sci_task_status sci_controller_start_task(struct isci_host *ihost, enum sci_status sci_controller_start_task(struct isci_host *ihost,
struct isci_remote_device *idev, struct isci_remote_device *idev,
struct isci_request *ireq) struct isci_request *ireq)
{ {
enum sci_status status; enum sci_status status;
@ -2728,7 +2728,7 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
"%s: SCIC Controller starting task from invalid " "%s: SCIC Controller starting task from invalid "
"state\n", "state\n",
__func__); __func__);
return SCI_TASK_FAILURE_INVALID_STATE; return SCI_FAILURE_INVALID_STATE;
} }
status = sci_remote_device_start_task(ihost, idev, ireq); status = sci_remote_device_start_task(ihost, idev, ireq);

View file

@ -490,7 +490,7 @@ enum sci_status sci_controller_start_io(
struct isci_remote_device *idev, struct isci_remote_device *idev,
struct isci_request *ireq); struct isci_request *ireq);
enum sci_task_status sci_controller_start_task( enum sci_status sci_controller_start_task(
struct isci_host *ihost, struct isci_host *ihost,
struct isci_remote_device *idev, struct isci_remote_device *idev,
struct isci_request *ireq); struct isci_request *ireq);

View file

@ -1626,9 +1626,9 @@ static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
if (status == SCI_SUCCESS) { if (status == SCI_SUCCESS) {
if (ireq->stp.rsp.status & ATA_ERR) if (ireq->stp.rsp.status & ATA_ERR)
status = SCI_IO_FAILURE_RESPONSE_VALID; status = SCI_FAILURE_IO_RESPONSE_VALID;
} else { } else {
status = SCI_IO_FAILURE_RESPONSE_VALID; status = SCI_FAILURE_IO_RESPONSE_VALID;
} }
if (status != SCI_SUCCESS) { if (status != SCI_SUCCESS) {

View file

@ -258,7 +258,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
struct isci_tmf *tmf, unsigned long timeout_ms) struct isci_tmf *tmf, unsigned long timeout_ms)
{ {
DECLARE_COMPLETION_ONSTACK(completion); DECLARE_COMPLETION_ONSTACK(completion);
enum sci_task_status status = SCI_TASK_FAILURE; enum sci_status status = SCI_FAILURE;
struct isci_request *ireq; struct isci_request *ireq;
int ret = TMF_RESP_FUNC_FAILED; int ret = TMF_RESP_FUNC_FAILED;
unsigned long flags; unsigned long flags;
@ -301,7 +301,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
/* start the TMF io. */ /* start the TMF io. */
status = sci_controller_start_task(ihost, idev, ireq); status = sci_controller_start_task(ihost, idev, ireq);
if (status != SCI_TASK_SUCCESS) { if (status != SCI_SUCCESS) {
dev_dbg(&ihost->pdev->dev, dev_dbg(&ihost->pdev->dev,
"%s: start_io failed - status = 0x%x, request = %p\n", "%s: start_io failed - status = 0x%x, request = %p\n",
__func__, __func__,

View file

@ -788,7 +788,8 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
return rc; return rc;
return iscsi_conn_get_addr_param((struct sockaddr_storage *) return iscsi_conn_get_addr_param((struct sockaddr_storage *)
&addr, param, buf); &addr,
(enum iscsi_param)param, buf);
default: default:
return iscsi_host_get_param(shost, param, buf); return iscsi_host_get_param(shost, param, buf);
} }

View file

@ -1124,6 +1124,7 @@ stop_rr_fcf_flogi:
phba->fcf.fcf_flag &= ~FCF_DISCOVERY; phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
phba->fcf.fcf_redisc_attempted = 0; /* reset */
goto out; goto out;
} }
if (!rc) { if (!rc) {
@ -1138,6 +1139,7 @@ stop_rr_fcf_flogi:
phba->fcf.fcf_flag &= ~FCF_DISCOVERY; phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
phba->fcf.fcf_redisc_attempted = 0; /* reset */
goto out; goto out;
} }
} }

View file

@ -1966,6 +1966,26 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
"failover and change port state:x%x/x%x\n", "failover and change port state:x%x/x%x\n",
phba->pport->port_state, LPFC_VPORT_UNKNOWN); phba->pport->port_state, LPFC_VPORT_UNKNOWN);
phba->pport->port_state = LPFC_VPORT_UNKNOWN; phba->pport->port_state = LPFC_VPORT_UNKNOWN;
if (!phba->fcf.fcf_redisc_attempted) {
lpfc_unregister_fcf(phba);
rc = lpfc_sli4_redisc_fcf_table(phba);
if (!rc) {
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"3195 Rediscover FCF table\n");
phba->fcf.fcf_redisc_attempted = 1;
lpfc_sli4_clear_fcf_rr_bmask(phba);
} else {
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"3196 Rediscover FCF table "
"failed. Status:x%x\n", rc);
}
} else {
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"3197 Already rediscover FCF table "
"attempted. No more retry\n");
}
goto stop_flogi_current_fcf; goto stop_flogi_current_fcf;
} else { } else {
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS, lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,

View file

@ -4376,7 +4376,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
break; break;
} }
/* If fast FCF failover rescan event is pending, do nothing */ /* If fast FCF failover rescan event is pending, do nothing */
if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
break; break;
} }

View file

@ -16350,15 +16350,8 @@ next_priority:
goto initial_priority; goto initial_priority;
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2844 No roundrobin failover FCF available\n"); "2844 No roundrobin failover FCF available\n");
if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
return LPFC_FCOE_FCF_NEXT_NONE; return LPFC_FCOE_FCF_NEXT_NONE;
else {
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"3063 Only FCF available idx %d, flag %x\n",
next_fcf_index,
phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
return next_fcf_index;
}
} }
if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&

View file

@ -237,6 +237,7 @@ struct lpfc_fcf {
#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ #define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ #define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
#define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT) #define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT)
uint16_t fcf_redisc_attempted;
uint32_t addr_mode; uint32_t addr_mode;
uint32_t eligible_fcf_cnt; uint32_t eligible_fcf_cnt;
struct lpfc_fcf_rec current_rec; struct lpfc_fcf_rec current_rec;

View file

@ -3585,12 +3585,12 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
/* /*
* The cur_state should not last for more than max_wait secs * The cur_state should not last for more than max_wait secs
*/ */
for (i = 0; i < (max_wait * 1000); i++) { for (i = 0; i < max_wait; i++) {
curr_abs_state = instance->instancet-> curr_abs_state = instance->instancet->
read_fw_status_reg(instance->reg_set); read_fw_status_reg(instance->reg_set);
if (abs_state == curr_abs_state) { if (abs_state == curr_abs_state) {
msleep(1); msleep(1000);
} else } else
break; break;
} }

View file

@ -677,10 +677,6 @@ mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
r = _config_request(ioc, &mpi_request, mpi_reply, r = _config_request(ioc, &mpi_request, mpi_reply,
MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
sizeof(*config_page)); sizeof(*config_page));
mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
r = _config_request(ioc, &mpi_request, mpi_reply,
MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
sizeof(*config_page));
out: out:
return r; return r;
} }

View file

@ -3254,6 +3254,40 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
return _scsih_check_for_pending_tm(ioc, smid); return _scsih_check_for_pending_tm(ioc, smid);
} }
/** _scsih_allow_scmd_to_device - check whether scmd needs to
* issue to IOC or not.
* @ioc: per adapter object
* @scmd: pointer to scsi command object
*
* Returns true if scmd can be issued to IOC otherwise returns false.
*/
inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
struct scsi_cmnd *scmd)
{
if (ioc->pci_error_recovery)
return false;
if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
if (ioc->remove_host)
return false;
return true;
}
if (ioc->remove_host) {
switch (scmd->cmnd[0]) {
case SYNCHRONIZE_CACHE:
case START_STOP:
return true;
default:
return false;
}
}
return true;
}
/** /**
* _scsih_sas_control_complete - completion routine * _scsih_sas_control_complete - completion routine
@ -3880,7 +3914,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
return 0; return 0;
} }
if (ioc->pci_error_recovery || ioc->remove_host) { if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
scmd->result = DID_NO_CONNECT << 16; scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd); scmd->scsi_done(scmd);
return 0; return 0;

View file

@ -301,7 +301,7 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
struct omap2_mcspi_cs *cs = spi->controller_state; struct omap2_mcspi_cs *cs = spi->controller_state;
struct omap2_mcspi *mcspi; struct omap2_mcspi *mcspi;
unsigned int wcnt; unsigned int wcnt;
int max_fifo_depth, fifo_depth, bytes_per_word; int max_fifo_depth, bytes_per_word;
u32 chconf, xferlevel; u32 chconf, xferlevel;
mcspi = spi_master_get_devdata(master); mcspi = spi_master_get_devdata(master);
@ -317,10 +317,6 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
else else
max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH; max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
fifo_depth = gcd(t->len, max_fifo_depth);
if (fifo_depth < 2 || fifo_depth % bytes_per_word != 0)
goto disable_fifo;
wcnt = t->len / bytes_per_word; wcnt = t->len / bytes_per_word;
if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT) if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT)
goto disable_fifo; goto disable_fifo;
@ -328,16 +324,17 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
xferlevel = wcnt << 16; xferlevel = wcnt << 16;
if (t->rx_buf != NULL) { if (t->rx_buf != NULL) {
chconf |= OMAP2_MCSPI_CHCONF_FFER; chconf |= OMAP2_MCSPI_CHCONF_FFER;
xferlevel |= (fifo_depth - 1) << 8; xferlevel |= (bytes_per_word - 1) << 8;
} }
if (t->tx_buf != NULL) { if (t->tx_buf != NULL) {
chconf |= OMAP2_MCSPI_CHCONF_FFET; chconf |= OMAP2_MCSPI_CHCONF_FFET;
xferlevel |= fifo_depth - 1; xferlevel |= bytes_per_word - 1;
} }
mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel); mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel);
mcspi_write_chconf0(spi, chconf); mcspi_write_chconf0(spi, chconf);
mcspi->fifo_depth = fifo_depth; mcspi->fifo_depth = max_fifo_depth;
return; return;
} }
@ -569,7 +566,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
struct dma_slave_config cfg; struct dma_slave_config cfg;
enum dma_slave_buswidth width; enum dma_slave_buswidth width;
unsigned es; unsigned es;
u32 burst;
void __iomem *chstat_reg; void __iomem *chstat_reg;
void __iomem *irqstat_reg; void __iomem *irqstat_reg;
int wait_res; int wait_res;
@ -591,22 +587,14 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
} }
count = xfer->len; count = xfer->len;
burst = 1;
if (mcspi->fifo_depth > 0) {
if (count > mcspi->fifo_depth)
burst = mcspi->fifo_depth / es;
else
burst = count / es;
}
memset(&cfg, 0, sizeof(cfg)); memset(&cfg, 0, sizeof(cfg));
cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0; cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0; cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
cfg.src_addr_width = width; cfg.src_addr_width = width;
cfg.dst_addr_width = width; cfg.dst_addr_width = width;
cfg.src_maxburst = burst; cfg.src_maxburst = 1;
cfg.dst_maxburst = burst; cfg.dst_maxburst = 1;
rx = xfer->rx_buf; rx = xfer->rx_buf;
tx = xfer->tx_buf; tx = xfer->tx_buf;

View file

@ -1198,8 +1198,8 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
i = platform_get_irq(pdev, 0); i = platform_get_irq(pdev, 0);
if (i < 0) { if (i < 0) {
dev_err(&pdev->dev, "cannot get platform IRQ\n"); dev_err(&pdev->dev, "cannot get IRQ\n");
ret = -ENOENT; ret = i;
goto err1; goto err1;
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2004-2014 Bernd Porr, mail@berndporr.me.uk * Copyright (C) 2004-2019 Bernd Porr, mail@berndporr.me.uk
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
@ -17,7 +17,7 @@
* Description: University of Stirling USB DAQ & INCITE Technology Limited * Description: University of Stirling USB DAQ & INCITE Technology Limited
* Devices: [ITL] USB-DUX-FAST (usbduxfast) * Devices: [ITL] USB-DUX-FAST (usbduxfast)
* Author: Bernd Porr <mail@berndporr.me.uk> * Author: Bernd Porr <mail@berndporr.me.uk>
* Updated: 10 Oct 2014 * Updated: 16 Nov 2019
* Status: stable * Status: stable
*/ */
@ -31,6 +31,7 @@
* *
* *
* Revision history: * Revision history:
* 1.0: Fixed a rounding error in usbduxfast_ai_cmdtest
* 0.9: Dropping the first data packet which seems to be from the last transfer. * 0.9: Dropping the first data packet which seems to be from the last transfer.
* Buffer overflows in the FX2 are handed over to comedi. * Buffer overflows in the FX2 are handed over to comedi.
* 0.92: Dropping now 4 packets. The quad buffer has to be emptied. * 0.92: Dropping now 4 packets. The quad buffer has to be emptied.
@ -359,6 +360,7 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
struct comedi_cmd *cmd) struct comedi_cmd *cmd)
{ {
int err = 0; int err = 0;
int err2 = 0;
unsigned int steps; unsigned int steps;
unsigned int arg; unsigned int arg;
@ -408,11 +410,16 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
*/ */
steps = (cmd->convert_arg * 30) / 1000; steps = (cmd->convert_arg * 30) / 1000;
if (cmd->chanlist_len != 1) if (cmd->chanlist_len != 1)
err |= comedi_check_trigger_arg_min(&steps, err2 |= comedi_check_trigger_arg_min(&steps,
MIN_SAMPLING_PERIOD); MIN_SAMPLING_PERIOD);
err |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD); else
arg = (steps * 1000) / 30; err2 |= comedi_check_trigger_arg_min(&steps, 1);
err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg); err2 |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD);
if (err2) {
err |= err2;
arg = (steps * 1000) / 30;
err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
}
if (cmd->stop_src == TRIG_COUNT) if (cmd->stop_src == TRIG_COUNT)
err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1); err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);

View file

@ -389,7 +389,8 @@ int pcie_speeds(struct hfi1_devdata *dd)
/* /*
* bus->max_bus_speed is set from the bridge's linkcap Max Link Speed * bus->max_bus_speed is set from the bridge's linkcap Max Link Speed
*/ */
if (dd->pcidev->bus->max_bus_speed != PCIE_SPEED_8_0GT) { if (dd->pcidev->bus->max_bus_speed == PCIE_SPEED_2_5GT ||
dd->pcidev->bus->max_bus_speed == PCIE_SPEED_5_0GT) {
dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n"); dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n");
dd->link_gen3_capable = 0; dd->link_gen3_capable = 0;
} }

View file

@ -350,8 +350,8 @@ static irqreturn_t rcar_thermal_irq(int irq, void *data)
rcar_thermal_for_each_priv(priv, common) { rcar_thermal_for_each_priv(priv, common) {
if (rcar_thermal_had_changed(priv, status)) { if (rcar_thermal_had_changed(priv, status)) {
rcar_thermal_irq_disable(priv); rcar_thermal_irq_disable(priv);
schedule_delayed_work(&priv->work, queue_delayed_work(system_freezable_wq, &priv->work,
msecs_to_jiffies(300)); msecs_to_jiffies(300));
} }
} }

View file

@ -1192,14 +1192,13 @@ static long slgt_compat_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg) unsigned int cmd, unsigned long arg)
{ {
struct slgt_info *info = tty->driver_data; struct slgt_info *info = tty->driver_data;
int rc = -ENOIOCTLCMD; int rc;
if (sanity_check(info, tty->name, "compat_ioctl")) if (sanity_check(info, tty->name, "compat_ioctl"))
return -ENODEV; return -ENODEV;
DBGINFO(("%s compat_ioctl() cmd=%08X\n", info->device_name, cmd)); DBGINFO(("%s compat_ioctl() cmd=%08X\n", info->device_name, cmd));
switch (cmd) { switch (cmd) {
case MGSL_IOCSPARAMS32: case MGSL_IOCSPARAMS32:
rc = set_params32(info, compat_ptr(arg)); rc = set_params32(info, compat_ptr(arg));
break; break;
@ -1219,18 +1218,11 @@ static long slgt_compat_ioctl(struct tty_struct *tty,
case MGSL_IOCWAITGPIO: case MGSL_IOCWAITGPIO:
case MGSL_IOCGXSYNC: case MGSL_IOCGXSYNC:
case MGSL_IOCGXCTRL: case MGSL_IOCGXCTRL:
case MGSL_IOCSTXIDLE: rc = ioctl(tty, cmd, (unsigned long)compat_ptr(arg));
case MGSL_IOCTXENABLE:
case MGSL_IOCRXENABLE:
case MGSL_IOCTXABORT:
case TIOCMIWAIT:
case MGSL_IOCSIF:
case MGSL_IOCSXSYNC:
case MGSL_IOCSXCTRL:
rc = ioctl(tty, cmd, arg);
break; break;
default:
rc = ioctl(tty, cmd, arg);
} }
DBGINFO(("%s compat_ioctl() cmd=%08X rc=%d\n", info->device_name, cmd, rc)); DBGINFO(("%s compat_ioctl() cmd=%08X rc=%d\n", info->device_name, cmd, rc));
return rc; return rc;
} }

View file

@ -161,8 +161,11 @@ static int appledisplay_bl_update_status(struct backlight_device *bd)
pdata->msgdata, 2, pdata->msgdata, 2,
ACD_USB_TIMEOUT); ACD_USB_TIMEOUT);
mutex_unlock(&pdata->sysfslock); mutex_unlock(&pdata->sysfslock);
return retval; if (retval < 0)
return retval;
else
return 0;
} }
static int appledisplay_bl_get_brightness(struct backlight_device *bd) static int appledisplay_bl_get_brightness(struct backlight_device *bd)
@ -180,7 +183,12 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
0, 0,
pdata->msgdata, 2, pdata->msgdata, 2,
ACD_USB_TIMEOUT); ACD_USB_TIMEOUT);
brightness = pdata->msgdata[1]; if (retval < 2) {
if (retval >= 0)
retval = -EMSGSIZE;
} else {
brightness = pdata->msgdata[1];
}
mutex_unlock(&pdata->sysfslock); mutex_unlock(&pdata->sysfslock);
if (retval < 0) if (retval < 0)
@ -326,6 +334,7 @@ error:
if (pdata) { if (pdata) {
if (pdata->urb) { if (pdata->urb) {
usb_kill_urb(pdata->urb); usb_kill_urb(pdata->urb);
cancel_delayed_work_sync(&pdata->work);
if (pdata->urbdata) if (pdata->urbdata)
usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN, usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
pdata->urbdata, pdata->urb->transfer_dma); pdata->urbdata, pdata->urb->transfer_dma);

View file

@ -121,6 +121,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
{ USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */
{ USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */ { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */ { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */

View file

@ -1941,10 +1941,6 @@ static int mos7720_startup(struct usb_serial *serial)
} }
} }
/* setting configuration feature to one */
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
(__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT #ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
if (product == MOSCHIP_DEVICE_ID_7715) { if (product == MOSCHIP_DEVICE_ID_7715) {
ret_val = mos7715_parport_init(serial); ret_val = mos7715_parport_init(serial);

View file

@ -131,11 +131,15 @@
/* This driver also supports /* This driver also supports
* ATEN UC2324 device using Moschip MCS7840 * ATEN UC2324 device using Moschip MCS7840
* ATEN UC2322 device using Moschip MCS7820 * ATEN UC2322 device using Moschip MCS7820
* MOXA UPort 2210 device using Moschip MCS7820
*/ */
#define USB_VENDOR_ID_ATENINTL 0x0557 #define USB_VENDOR_ID_ATENINTL 0x0557
#define ATENINTL_DEVICE_ID_UC2324 0x2011 #define ATENINTL_DEVICE_ID_UC2324 0x2011
#define ATENINTL_DEVICE_ID_UC2322 0x7820 #define ATENINTL_DEVICE_ID_UC2322 0x7820
#define USB_VENDOR_ID_MOXA 0x110a
#define MOXA_DEVICE_ID_2210 0x2210
/* Interrupt Routine Defines */ /* Interrupt Routine Defines */
#define SERIAL_IIR_RLS 0x06 #define SERIAL_IIR_RLS 0x06
@ -206,6 +210,7 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
{USB_DEVICE(USB_VENDOR_ID_MOXA, MOXA_DEVICE_ID_2210)},
{} /* terminating entry */ {} /* terminating entry */
}; };
MODULE_DEVICE_TABLE(usb, id_table); MODULE_DEVICE_TABLE(usb, id_table);
@ -2089,6 +2094,7 @@ static int mos7840_probe(struct usb_serial *serial,
const struct usb_device_id *id) const struct usb_device_id *id)
{ {
u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
u16 vid = le16_to_cpu(serial->dev->descriptor.idVendor);
u8 *buf; u8 *buf;
int device_type; int device_type;
@ -2098,6 +2104,11 @@ static int mos7840_probe(struct usb_serial *serial,
goto out; goto out;
} }
if (vid == USB_VENDOR_ID_MOXA && product == MOXA_DEVICE_ID_2210) {
device_type = MOSCHIP_DEVICE_ID_7820;
goto out;
}
buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL); buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
@ -2350,11 +2361,6 @@ out:
goto error; goto error;
} else } else
dev_dbg(&port->dev, "ZLP_REG5 Writing success status%d\n", status); dev_dbg(&port->dev, "ZLP_REG5 Writing success status%d\n", status);
/* setting configuration feature to one */
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
0x03, 0x00, 0x01, 0x00, NULL, 0x00,
MOS_WDR_TIMEOUT);
} }
return 0; return 0;
error: error:

View file

@ -200,6 +200,7 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */ #define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */
#define DELL_PRODUCT_5821E 0x81d7 #define DELL_PRODUCT_5821E 0x81d7
#define DELL_PRODUCT_5821E_ESIM 0x81e0
#define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da #define KYOCERA_PRODUCT_KPC650 0x17da
@ -1043,6 +1044,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E), { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E),
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM),
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@ -1987,6 +1990,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) }, { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) }, { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) }, { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
{ USB_DEVICE(0x0489, 0xe0b4), /* Foxconn T77W968 */
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */ { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */ { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */

View file

@ -2966,6 +2966,10 @@ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
again: again:
b = get_old_root(root, time_seq); b = get_old_root(root, time_seq);
if (!b) {
ret = -EIO;
goto done;
}
level = btrfs_header_level(b); level = btrfs_header_level(b);
p->locks[level] = BTRFS_READ_LOCK; p->locks[level] = BTRFS_READ_LOCK;

View file

@ -1515,7 +1515,6 @@ retry_lookup:
if (IS_ERR(realdn)) { if (IS_ERR(realdn)) {
err = PTR_ERR(realdn); err = PTR_ERR(realdn);
d_drop(dn); d_drop(dn);
dn = NULL;
goto next_item; goto next_item;
} }
dn = realdn; dn = realdn;

View file

@ -683,7 +683,7 @@ int dlm_ls_start(struct dlm_ls *ls)
error = dlm_config_nodes(ls->ls_name, &nodes, &count); error = dlm_config_nodes(ls->ls_name, &nodes, &count);
if (error < 0) if (error < 0)
goto fail; goto fail_rv;
spin_lock(&ls->ls_recover_lock); spin_lock(&ls->ls_recover_lock);
@ -715,8 +715,9 @@ int dlm_ls_start(struct dlm_ls *ls)
return 0; return 0;
fail: fail:
kfree(rv);
kfree(nodes); kfree(nodes);
fail_rv:
kfree(rv);
return error; return error;
} }

View file

@ -705,7 +705,7 @@ static int copy_result_to_user(struct dlm_user_args *ua, int compat,
result.version[0] = DLM_DEVICE_VERSION_MAJOR; result.version[0] = DLM_DEVICE_VERSION_MAJOR;
result.version[1] = DLM_DEVICE_VERSION_MINOR; result.version[1] = DLM_DEVICE_VERSION_MINOR;
result.version[2] = DLM_DEVICE_VERSION_PATCH; result.version[2] = DLM_DEVICE_VERSION_PATCH;
memcpy(&result.lksb, &ua->lksb, sizeof(struct dlm_lksb)); memcpy(&result.lksb, &ua->lksb, offsetof(struct dlm_lksb, sb_lvbptr));
result.user_lksb = ua->user_lksb; result.user_lksb = ua->user_lksb;
/* FIXME: dlm1 provides for the user's bastparam/addr to not be updated /* FIXME: dlm1 provides for the user's bastparam/addr to not be updated

View file

@ -645,7 +645,10 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
RB_CLEAR_NODE(&rs->rs_node); RB_CLEAR_NODE(&rs->rs_node);
if (rs->rs_free) { if (rs->rs_free) {
struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm); u64 last_block = gfs2_rbm_to_block(&rs->rs_rbm) +
rs->rs_free - 1;
struct gfs2_rbm last_rbm = { .rgd = rs->rs_rbm.rgd, };
struct gfs2_bitmap *start, *last;
/* return reserved blocks to the rgrp */ /* return reserved blocks to the rgrp */
BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free); BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
@ -656,7 +659,13 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
it will force the number to be recalculated later. */ it will force the number to be recalculated later. */
rgd->rd_extfail_pt += rs->rs_free; rgd->rd_extfail_pt += rs->rs_free;
rs->rs_free = 0; rs->rs_free = 0;
clear_bit(GBF_FULL, &bi->bi_flags); if (gfs2_rbm_from_block(&last_rbm, last_block))
return;
start = rbm_bi(&rs->rs_rbm);
last = rbm_bi(&last_rbm);
do
clear_bit(GBF_FULL, &start->bi_flags);
while (start++ != last);
} }
} }

Some files were not shown because too many files have changed in this diff Show more