This is the 4.4.166 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlwCSnEACgkQONu9yGCS
 aT6tLg//bXn9+eEGojqXMUxdjbEDheFOjoQ8NmNFmwYjN53N5BJrf9+zkrOEyjOM
 yPlrp55WHaen7qEtk+WHMw4cMscelXF2sFcNz6F8/PXcHawzO8w0FGyYf9eZ7e+a
 T6biup71w+2JS29H6nD+p/g4l9eZsPx6Din3lGDwJ8CCwaMGb0UUglEU+nt+LI57
 9bEbRqqez+Sx9aPf5oOZ3/hwgZjJ+kvDS9bjohBmjC9iEeLhIRJzcIBJqfDa16+N
 Ra3hShWHkhin7k3YcyIja6BIxzKSgFyxAUilEMPdZToB8cwnj+mIeYvSSBSJqdoz
 E/TXrXCt0jAMdfR4R57LJr46FCmKj/PV9svQaXGvULc7c91KfeTl8LzA5mZiRQ3K
 jHX6FFuGxx1y1tXjHmZKZJCKA2fjuS4TPgF8uJov7IYkur0+GKGr5D8xIXwyU4r6
 dbo8hUdqBLXChV+dbvVjex9Gj7rNST2fz3Rk/hlEyxMqusqvpPe3/3m1dqlZR6Z8
 LyEqWFP/UnVH/ZG3Mv/UBJMLHnpT2dhxIy+tdUGCMYtw9LLr+UUdUhGafeuBXg+T
 cPJUs3gEGKMuJM4ZWpK0cWZk5xqvF6NpGOipVVSKSeWl2YL6NFMRBWVn9ghziS0f
 +/g2h34xRxJAWgBe8FI9KrUihuTmV4E+C77M3pQwd/R/vwCCz2M=
 =QCXc
 -----END PGP SIGNATURE-----

Merge 4.4.166 into android-4.4

Changes in 4.4.166
	usb: core: Fix hub port connection events lost
	usb: xhci: fix timeout for transition from RExit to U0
	MAINTAINERS: Add Sasha as a stable branch maintainer
	iwlwifi: mvm: support sta_statistics() even on older firmware
	v9fs_dir_readdir: fix double-free on p9stat_read error
	bfs: add sanity check at bfs_fill_super()
	sctp: clear the transport of some out_chunk_list chunks in sctp_assoc_rm_peer
	gfs2: Don't leave s_fs_info pointing to freed memory in init_sbd
	llc: do not use sk_eat_skb()
	drm/ast: change resolution may cause screen blurred
	drm/ast: fixed cursor may disappear sometimes
	can: dev: can_get_echo_skb(): factor out non sending code to __can_get_echo_skb()
	can: dev: __can_get_echo_skb(): replace struct can_frame by canfd_frame to access frame length
	can: dev: __can_get_echo_skb(): Don't crash the kernel if can_priv::echo_skb is accessed out of bounds
	can: dev: __can_get_echo_skb(): print error message, if trying to echo non existing skb
	usb: xhci: Prevent bus suspend if a port connect change or polling state is detected
	KVM: PPC: Move and undef TRACE_INCLUDE_PATH/FILE
	cpufreq: imx6q: add return value check for voltage scale
	SUNRPC: Fix a bogus get/put in generic_key_to_expire()
	kdb: Use strscpy with destination buffer size
	powerpc/numa: Suppress "VPHN is not supported" messages
	tmpfs: make lseek(SEEK_DATA/SEK_HOLE) return ENXIO with a negative offset
	of: add helper to lookup compatible child node
	NFC: nfcmrvl_uart: fix OF child-node lookup
	net: bcmgenet: fix OF child-node lookup
	x86/entry: spell EBX register correctly in documentation
	x86/entry/64: Remove %ebx handling from error_entry/exit
	arm64: remove no-op -p linker flag
	ath10k: fix kernel panic due to race in accessing arvif list
	Input: xpad - remove spurious events of wireless xpad 360 controller
	Input: xpad - handle "present" and "gone" correctly
	Input: xpad - update Xbox One Force Feedback Support
	Input: xpad - workaround dead irq_out after suspend/ resume
	Input: xpad - use LED API when identifying wireless controllers
	Input: xpad - correct xbox one pad device name
	Input: xpad - remove unused function
	Input: xpad - add Mad Catz FightStick TE 2 VID/PID
	Input: xpad - prevent spurious input from wired Xbox 360 controllers
	Input: xpad - add more third-party controllers
	Input: xpad - xbox one elite controller support
	Input: xpad - fix rumble on Xbox One controllers with 2015 firmware
	Input: xpad - power off wireless 360 controllers on suspend
	Input: xpad - add product ID for Xbox One S pad
	Input: xpad - fix Xbox One rumble stopping after 2.5 secs
	Input: xpad - correctly sort vendor id's
	Input: xpad - move reporting xbox one home button to common function
	Input: xpad - simplify error condition in init_output
	Input: xpad - don't depend on endpoint order
	Input: xpad - fix stuck mode button on Xbox One S pad
	Input: xpad - restore LED state after device resume
	Input: xpad - support some quirky Xbox One pads
	Input: xpad - sort supported devices by USB ID
	Input: xpad - sync supported devices with xboxdrv
	Input: xpad - add USB IDs for Mad Catz Brawlstick and Razer Sabertooth
	Input: xpad - sync supported devices with 360Controller
	Input: xpad - sync supported devices with XBCD
	Input: xpad - constify usb_device_id
	Input: xpad - fix PowerA init quirk for some gamepad models
	Input: xpad - validate USB endpoint type during probe
	Input: xpad - add support for PDP Xbox One controllers
	Input: xpad - add PDP device id 0x02a4
	Input: xpad - fix some coding style issues
	Input: xpad - avoid using __set_bit() for capabilities
	Input: xpad - add GPD Win 2 Controller USB IDs
	Input: xpad - fix GPD Win 2 controller name
	Input: xpad - add support for Xbox1 PDP Camo series gamepad
	cw1200: Don't leak memory if krealloc failes
	mwifiex: Fix NULL pointer dereference in skb_dequeue()
	mwifiex: fix p2p device doesn't find in scan problem
	netfilter: nf_tables: fix oops when inserting an element into a verdict map
	scsi: ufs: fix bugs related to null pointer access and array size
	scsi: ufshcd: Fix race between clk scaling and ungate work
	scsi: ufs: fix race between clock gating and devfreq scaling work
	scsi: ufshcd: release resources if probe fails
	scsi: qla2xxx: do not queue commands when unloading
	iwlwifi: mvm: fix regulatory domain update when the firmware starts
	tty: wipe buffer.
	tty: wipe buffer if not echoing data
	usb: xhci: fix uninitialized completion when USB3 port got wrong status
	btrfs: Ensure btrfs_trim_fs can trim the whole filesystem
	sched/core: Allow __sched_setscheduler() in interrupts when PI is not used
	namei: allow restricted O_CREAT of FIFOs and regular files
	s390/mm: Check for valid vma before zapping in gmap_discard
	drm/ast: Remove existing framebuffers before loading driver
	Linux 4.4.166

Change-Id: Iba8f0b45bc490f291b504ebb12590b2b01d4f075
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2018-12-01 10:03:54 +01:00
commit 4c4bedf5b3
55 changed files with 1121 additions and 342 deletions

View file

@ -34,7 +34,9 @@ Currently, these files are in /proc/sys/fs:
- overflowgid
- pipe-user-pages-hard
- pipe-user-pages-soft
- protected_fifos
- protected_hardlinks
- protected_regular
- protected_symlinks
- suid_dumpable
- super-max
@ -182,6 +184,24 @@ applied.
==============================================================
protected_fifos:
The intent of this protection is to avoid unintentional writes to
an attacker-controlled FIFO, where a program expected to create a regular
file.
When set to "0", writing to FIFOs is unrestricted.
When set to "1" don't allow O_CREAT open on FIFOs that we don't own
in world writable sticky directories, unless they are owned by the
owner of the directory.
When set to "2" it also applies to group writable sticky directories.
This protection is based on the restrictions in Openwall.
==============================================================
protected_hardlinks:
A long-standing class of security issues is the hardlink-based
@ -202,6 +222,22 @@ This protection is based on the restrictions in Openwall and grsecurity.
==============================================================
protected_regular:
This protection is similar to protected_fifos, but it
avoids writes to an attacker-controlled regular file, where a program
expected to create one.
When set to "0", writing to regular files is unrestricted.
When set to "1" don't allow O_CREAT open on regular files that we
don't own in world writable sticky directories, unless they are
owned by the owner of the directory.
When set to "2" it also applies to group writable sticky directories.
==============================================================
protected_symlinks:
A long-standing class of security issues is the symlink-based

View file

@ -10200,6 +10200,7 @@ F: arch/alpha/kernel/srm_env.c
STABLE BRANCH
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
M: Sasha Levin <sashal@kernel.org>
L: stable@vger.kernel.org
S: Supported
F: Documentation/stable_kernel_rules.txt

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 165
SUBLEVEL = 166
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -10,7 +10,7 @@
#
# Copyright (C) 1995-2001 by Russell King
LDFLAGS_vmlinux :=-p --no-undefined -X
LDFLAGS_vmlinux :=--no-undefined -X
CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
GZFLAGS :=-9

View file

@ -5,8 +5,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace
/*
* Tracepoint for guest mode entry.
@ -119,4 +117,10 @@ TRACE_EVENT(kvm_check_requests,
#endif /* _TRACE_KVM_H */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>

View file

@ -5,8 +5,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm_booke
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_booke
#define kvm_trace_symbol_exit \
{0, "CRITICAL"}, \
@ -217,4 +215,11 @@ TRACE_EVENT(kvm_booke_queue_irqprio,
#endif
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_booke
#include <trace/define_trace.h>

View file

@ -8,8 +8,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm_hv
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_hv
#define kvm_trace_symbol_hcall \
{H_REMOVE, "H_REMOVE"}, \
@ -474,4 +472,11 @@ TRACE_EVENT(kvmppc_run_vcpu_exit,
#endif /* _TRACE_KVM_HV_H */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_hv
#include <trace/define_trace.h>

View file

@ -7,8 +7,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm_pr
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_pr
TRACE_EVENT(kvm_book3s_reenter,
TP_PROTO(int r, struct kvm_vcpu *vcpu),
@ -271,4 +269,11 @@ TRACE_EVENT(kvm_unmap_hva,
#endif /* _TRACE_KVM_H */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_pr
#include <trace/define_trace.h>

View file

@ -1319,7 +1319,7 @@ static long vphn_get_associativity(unsigned long cpu,
switch (rc) {
case H_FUNCTION:
printk(KERN_INFO
printk_once(KERN_INFO
"VPHN is not supported. Disabling polling...\n");
stop_topology_update();
break;

View file

@ -637,6 +637,8 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
vmaddr |= gaddr & ~PMD_MASK;
/* Find vma in the parent mm */
vma = find_vma(gmap->mm, vmaddr);
if (!vma)
continue;
size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
zap_page_range(vma, vmaddr, size, NULL);
}

View file

@ -889,7 +889,7 @@ ENTRY(\sym)
call \do_sym
jmp error_exit /* %ebx: no swapgs flag */
jmp error_exit
.endif
END(\sym)
.endm
@ -1151,7 +1151,6 @@ END(paranoid_exit)
/*
* Save all registers in pt_regs, and switch gs if needed.
* Return: EBX=0: came from user mode; EBX=1: otherwise
*/
ENTRY(error_entry)
cld
@ -1164,7 +1163,6 @@ ENTRY(error_entry)
* the kernel CR3 here.
*/
SWITCH_KERNEL_CR3
xorl %ebx, %ebx
testb $3, CS+8(%rsp)
jz .Lerror_kernelspace
@ -1198,7 +1196,6 @@ ENTRY(error_entry)
* for these here too.
*/
.Lerror_kernelspace:
incl %ebx
leaq native_irq_return_iret(%rip), %rcx
cmpq %rcx, RIP+8(%rsp)
je .Lerror_bad_iret
@ -1229,28 +1226,19 @@ ENTRY(error_entry)
/*
* Pretend that the exception came from user mode: set up pt_regs
* as if we faulted immediately after IRET and clear EBX so that
* error_exit knows that we will be returning to user mode.
* as if we faulted immediately after IRET.
*/
mov %rsp, %rdi
call fixup_bad_iret
mov %rax, %rsp
decl %ebx
jmp .Lerror_entry_from_usermode_after_swapgs
END(error_entry)
/*
* On entry, EBS is a "return to kernel mode" flag:
* 1: already in kernel mode, don't need SWAPGS
* 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
*/
ENTRY(error_exit)
movl %ebx, %eax
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
testl %eax, %eax
jnz retint_kernel
testb $3, CS(%rsp)
jz retint_kernel
jmp retint_user
END(error_exit)

View file

@ -130,8 +130,13 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
/* Ensure the arm clock divider is what we expect */
ret = clk_set_rate(arm_clk, new_freq * 1000);
if (ret) {
int ret1;
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
regulator_set_voltage_tol(arm_reg, volt_old, 0);
ret1 = regulator_set_voltage_tol(arm_reg, volt_old, 0);
if (ret1)
dev_warn(cpu_dev,
"failed to restore vddarm voltage: %d\n", ret1);
return ret;
}

View file

@ -60,8 +60,29 @@ static const struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
static void ast_kick_out_firmware_fb(struct pci_dev *pdev)
{
struct apertures_struct *ap;
bool primary = false;
ap = alloc_apertures(1);
if (!ap)
return;
ap->ranges[0].base = pci_resource_start(pdev, 0);
ap->ranges[0].size = pci_resource_len(pdev, 0);
#ifdef CONFIG_X86
primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
remove_conflicting_framebuffers(ap, "astdrmfb", primary);
kfree(ap);
}
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
ast_kick_out_firmware_fb(pdev);
return drm_get_pci_dev(pdev, ent, &driver);
}

View file

@ -552,6 +552,7 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
}
ast_bo_unreserve(bo);
ast_set_offset_reg(crtc);
ast_set_start_address_crt1(crtc, (u32)gpu_addr);
return 0;
@ -1249,7 +1250,7 @@ static int ast_cursor_move(struct drm_crtc *crtc,
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
/* dummy write to fire HWC */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00);
ast_show_cursor(crtc);
return 0;
}

File diff suppressed because it is too large Load diff

View file

@ -423,6 +423,34 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
}
EXPORT_SYMBOL_GPL(can_put_echo_skb);
struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
{
struct can_priv *priv = netdev_priv(dev);
struct sk_buff *skb = priv->echo_skb[idx];
struct canfd_frame *cf;
if (idx >= priv->echo_skb_max) {
netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
__func__, idx, priv->echo_skb_max);
return NULL;
}
if (!skb) {
netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
__func__, idx);
return NULL;
}
/* Using "struct canfd_frame::len" for the frame
* length is supported on both CAN and CANFD frames.
*/
cf = (struct canfd_frame *)skb->data;
*len_ptr = cf->len;
priv->echo_skb[idx] = NULL;
return skb;
}
/*
* Get the skb from the stack and loop it back locally
*
@ -432,22 +460,16 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
*/
unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
{
struct can_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
u8 len;
BUG_ON(idx >= priv->echo_skb_max);
skb = __can_get_echo_skb(dev, idx, &len);
if (!skb)
return 0;
if (priv->echo_skb[idx]) {
struct sk_buff *skb = priv->echo_skb[idx];
struct can_frame *cf = (struct can_frame *)skb->data;
u8 dlc = cf->can_dlc;
netif_rx(skb);
netif_rx(priv->echo_skb[idx]);
priv->echo_skb[idx] = NULL;
return dlc;
}
return 0;
return len;
}
EXPORT_SYMBOL_GPL(can_get_echo_skb);

View file

@ -491,7 +491,7 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
if (!compat)
return -ENOMEM;
priv->mdio_dn = of_find_compatible_node(dn, NULL, compat);
priv->mdio_dn = of_get_compatible_child(dn, compat);
kfree(compat);
if (!priv->mdio_dn) {
dev_err(kdev, "unable to find MDIO bus node\n");

View file

@ -4470,7 +4470,9 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
}
ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
spin_lock_bh(&ar->data_lock);
list_add(&arvif->list, &ar->arvifs);
spin_unlock_bh(&ar->data_lock);
/* It makes no sense to have firmware do keepalives. mac80211 already
* takes care of this with idle connection polling.
@ -4603,7 +4605,9 @@ err_peer_delete:
err_vdev_delete:
ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
ar->free_vdev_map |= 1LL << arvif->vdev_id;
spin_lock_bh(&ar->data_lock);
list_del(&arvif->list);
spin_unlock_bh(&ar->data_lock);
err:
if (arvif->beacon_buf) {
@ -4647,7 +4651,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
ar->free_vdev_map |= 1LL << arvif->vdev_id;
spin_lock_bh(&ar->data_lock);
list_del(&arvif->list);
spin_unlock_bh(&ar->data_lock);
if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {

View file

@ -1805,16 +1805,18 @@ static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size)
{
size_t pos = buf->data - buf->begin;
size_t size = pos + extra_size;
u8 *tmp;
size = round_up(size, FWLOAD_BLOCK_SIZE);
buf->begin = krealloc(buf->begin, size, GFP_KERNEL | GFP_DMA);
if (buf->begin) {
buf->data = &buf->begin[pos];
buf->end = &buf->begin[size];
return 0;
} else {
buf->end = buf->data = buf->begin;
tmp = krealloc(buf->begin, size, GFP_KERNEL | GFP_DMA);
if (!tmp) {
wsm_buf_deinit(buf);
return -ENOMEM;
}
buf->begin = tmp;
buf->data = &buf->begin[pos];
buf->end = &buf->begin[size];
return 0;
}

View file

@ -322,8 +322,12 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
goto out;
}
if (changed)
*changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
if (changed) {
u32 status = le32_to_cpu(resp->status);
*changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
status == MCC_RESP_ILLEGAL);
}
regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
__le32_to_cpu(resp->n_channels),
@ -4050,10 +4054,6 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return;
/* if beacon filtering isn't on mac80211 does it anyway */
if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
return;

View file

@ -667,9 +667,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
n_channels = __le32_to_cpu(mcc_resp->n_channels);
IWL_DEBUG_LAR(mvm,
"MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n",
status, mcc, mcc >> 8, mcc & 0xff,
!!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels);
"MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n",
status, mcc, mcc >> 8, mcc & 0xff, n_channels);
resp_len = sizeof(*mcc_resp) + n_channels * sizeof(__le32);
resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);

View file

@ -1150,6 +1150,12 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
priv->adapter->curr_iface_comb.p2p_intf--;
priv->adapter->curr_iface_comb.sta_intf++;
dev->ieee80211_ptr->iftype = type;
if (mwifiex_deinit_priv_params(priv))
return -1;
if (mwifiex_init_new_priv_params(priv, dev, type))
return -1;
if (mwifiex_sta_init_cmd(priv, false, false))
return -1;
break;
case NL80211_IFTYPE_ADHOC:
if (mwifiex_cfg80211_deinit_p2p(priv))
@ -2839,8 +2845,10 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
skb_queue_walk_safe(&priv->bypass_txq, skb, tmp)
skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) {
skb_unlink(skb, &priv->bypass_txq);
mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
}
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);

View file

@ -501,8 +501,10 @@ mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
struct sk_buff *skb, *tmp;
skb_queue_walk_safe(&ra_list->skb_head, skb, tmp)
skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) {
skb_unlink(skb, &ra_list->skb_head);
mwifiex_write_data_complete(adapter, skb, 0, -1);
}
}
/*
@ -598,11 +600,15 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
skb_unlink(skb, &priv->tdls_txq);
mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
}
skb_queue_walk_safe(&priv->bypass_txq, skb, tmp)
skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) {
skb_unlink(skb, &priv->bypass_txq);
mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
}
atomic_set(&priv->adapter->bypass_tx_pending, 0);
idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL);

View file

@ -73,10 +73,9 @@ static int nfcmrvl_uart_parse_dt(struct device_node *node,
struct device_node *matched_node;
int ret;
matched_node = of_find_compatible_node(node, NULL, "marvell,nfc-uart");
matched_node = of_get_compatible_child(node, "marvell,nfc-uart");
if (!matched_node) {
matched_node = of_find_compatible_node(node, NULL,
"mrvl,nfc-uart");
matched_node = of_get_compatible_child(node, "mrvl,nfc-uart");
if (!matched_node)
return -ENODEV;
}

View file

@ -712,6 +712,31 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
}
EXPORT_SYMBOL(of_get_next_available_child);
/**
* of_get_compatible_child - Find compatible child node
* @parent: parent node
* @compatible: compatible string
*
* Lookup child node whose compatible property contains the given compatible
* string.
*
* Returns a node pointer with refcount incremented, use of_node_put() on it
* when done; or NULL if not found.
*/
struct device_node *of_get_compatible_child(const struct device_node *parent,
const char *compatible)
{
struct device_node *child;
for_each_child_of_node(parent, child) {
if (of_device_is_compatible(child, compatible))
break;
}
return child;
}
EXPORT_SYMBOL(of_get_compatible_child);
/**
* of_get_child_by_name - Find the child node by name for a given parent
* @node: parent node

View file

@ -685,6 +685,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
srb_t *sp;
int rval;
if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) {
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
if (ha->flags.eeh_busy) {
if (ha->flags.pci_channel_io_perm_failure) {
ql_dbg(ql_dbg_aer, vha, 0x9010,

View file

@ -45,6 +45,7 @@
#define QUERY_DESC_MIN_SIZE 2
#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
(sizeof(struct utp_upiu_header)))
#define RESPONSE_UPIU_SENSE_DATA_LENGTH 18
#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
@ -383,7 +384,7 @@ struct utp_cmd_rsp {
__be32 residual_transfer_count;
__be32 reserved[4];
__be16 sense_data_len;
u8 sense_data[18];
u8 sense_data[RESPONSE_UPIU_SENSE_DATA_LENGTH];
};
/**

View file

@ -104,6 +104,7 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
ufshcd_remove(hba);
ufshcd_dealloc_host(hba);
}
/**
@ -147,6 +148,7 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = ufshcd_init(hba, mmio_base, pdev->irq);
if (err) {
dev_err(&pdev->dev, "Initialization failed\n");
ufshcd_dealloc_host(hba);
return err;
}

View file

@ -161,7 +161,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
if (ret) {
dev_err(dev, "%s: unable to find %s err %d\n",
__func__, prop_name, ret);
goto out_free;
goto out;
}
vreg->min_uA = 0;
@ -183,9 +183,6 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
goto out;
out_free:
devm_kfree(dev, vreg);
vreg = NULL;
out:
if (!ret)
*out_vreg = vreg;

View file

@ -586,6 +586,21 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
start:
switch (hba->clk_gating.state) {
case CLKS_ON:
/*
* Wait for the ungate work to complete if in progress.
* Though the clocks may be in ON state, the link could
* still be in hibner8 state if hibern8 is allowed
* during clock gating.
* Make sure we exit hibern8 state also in addition to
* clocks being ON.
*/
if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_work(&hba->clk_gating.ungate_work);
spin_lock_irqsave(hba->host->host_lock, flags);
goto start;
}
break;
case REQ_CLKS_OFF:
if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
@ -814,10 +829,14 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
int len;
if (lrbp->sense_buffer &&
ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
int len_to_copy;
len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
memcpy(lrbp->sense_buffer,
lrbp->ucd_rsp_ptr->sr.sense_data,
min_t(int, len, SCSI_SENSE_BUFFERSIZE));
min_t(int, len_to_copy, SCSI_SENSE_BUFFERSIZE));
}
}
@ -5280,7 +5299,10 @@ EXPORT_SYMBOL(ufshcd_system_suspend);
int ufshcd_system_resume(struct ufs_hba *hba)
{
if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
if (!hba)
return -EINVAL;
if (!hba->is_powered || pm_runtime_suspended(hba->dev))
/*
* Let the runtime resume take care of resuming
* if runtime suspended.
@ -5301,7 +5323,10 @@ EXPORT_SYMBOL(ufshcd_system_resume);
*/
int ufshcd_runtime_suspend(struct ufs_hba *hba)
{
if (!hba || !hba->is_powered)
if (!hba)
return -EINVAL;
if (!hba->is_powered)
return 0;
return ufshcd_suspend(hba, UFS_RUNTIME_PM);
@ -5331,10 +5356,13 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
*/
int ufshcd_runtime_resume(struct ufs_hba *hba)
{
if (!hba || !hba->is_powered)
if (!hba)
return -EINVAL;
if (!hba->is_powered)
return 0;
else
return ufshcd_resume(hba, UFS_RUNTIME_PM);
return ufshcd_resume(hba, UFS_RUNTIME_PM);
}
EXPORT_SYMBOL(ufshcd_runtime_resume);
@ -5441,8 +5469,6 @@ void ufshcd_remove(struct ufs_hba *hba)
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba);
scsi_host_put(hba->host);
ufshcd_exit_clk_gating(hba);
ufshcd_exit_latency_hist(hba);
if (ufshcd_is_clkscaling_enabled(hba))
@ -5568,15 +5594,47 @@ static int ufshcd_devfreq_target(struct device *dev,
{
int err = 0;
struct ufs_hba *hba = dev_get_drvdata(dev);
bool release_clk_hold = false;
unsigned long irq_flags;
if (!ufshcd_is_clkscaling_enabled(hba))
return -EINVAL;
spin_lock_irqsave(hba->host->host_lock, irq_flags);
if (ufshcd_eh_in_progress(hba)) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
return 0;
}
if (ufshcd_is_clkgating_allowed(hba) &&
(hba->clk_gating.state != CLKS_ON)) {
if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
/* hold the vote until the scaling work is completed */
hba->clk_gating.active_reqs++;
release_clk_hold = true;
hba->clk_gating.state = CLKS_ON;
} else {
/*
* Clock gating work seems to be running in parallel
* hence skip scaling work to avoid deadlock between
* current scaling work and gating work.
*/
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
return 0;
}
}
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
if (*freq == UINT_MAX)
err = ufshcd_scale_clks(hba, true);
else if (*freq == 0)
err = ufshcd_scale_clks(hba, false);
spin_lock_irqsave(hba->host->host_lock, irq_flags);
if (release_clk_hold)
__ufshcd_release(hba);
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
return err;
}
@ -5761,7 +5819,6 @@ exit_gating:
ufshcd_exit_latency_hist(hba);
out_disable:
hba->is_irq_enabled = false;
scsi_host_put(host);
ufshcd_hba_exit(hba);
out_error:
return err;

View file

@ -165,15 +165,29 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
return put_user(x, ptr);
}
/* If we are not echoing the data, perhaps this is a secret so erase it */
static inline void zero_buffer(struct tty_struct *tty, u8 *buffer, int size)
{
bool icanon = !!L_ICANON(tty);
bool no_echo = !L_ECHO(tty);
if (icanon && no_echo)
memset(buffer, 0x00, size);
}
static inline int tty_copy_to_user(struct tty_struct *tty,
void __user *to,
const void *from,
void *from,
unsigned long n)
{
struct n_tty_data *ldata = tty->disc_data;
int retval;
tty_audit_add_data(tty, from, n, ldata->icanon);
return copy_to_user(to, from, n);
retval = copy_to_user(to, from, n);
if (!retval)
zero_buffer(tty, from, n);
return retval;
}
/**
@ -2005,6 +2019,7 @@ static int copy_from_read_buf(struct tty_struct *tty,
is_eof = n == 1 && read_buf(ldata, tail) == EOF_CHAR(tty);
tty_audit_add_data(tty, read_buf_addr(ldata, tail), n,
ldata->icanon);
zero_buffer(tty, read_buf_addr(ldata, tail), n);
smp_store_release(&ldata->read_tail, ldata->read_tail + n);
/* Turn single EOF into zero-length read */
if (L_EXTPROC(tty) && ldata->icanon && is_eof &&

View file

@ -454,6 +454,8 @@ receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count)
if (count && disc->ops->receive_buf)
disc->ops->receive_buf(tty, p, f, count);
}
if (count > 0)
memset(p, 0, count);
return count;
}

View file

@ -2757,7 +2757,9 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
USB_PORT_FEAT_C_BH_PORT_RESET);
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_PORT_LINK_STATE);
usb_clear_port_feature(hub->hdev, port1,
if (udev)
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_CONNECTION);
/*

View file

@ -744,7 +744,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
status |= USB_PORT_STAT_SUSPEND;
}
if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME &&
!DEV_SUPERSPEED_ANY(raw_port_status)) {
!DEV_SUPERSPEED_ANY(raw_port_status) && hcd->speed < HCD_USB3) {
if ((raw_port_status & PORT_RESET) ||
!(raw_port_status & PORT_PE))
return 0xffffffff;
@ -790,7 +790,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
time_left = wait_for_completion_timeout(
&bus_state->rexit_done[wIndex],
msecs_to_jiffies(
XHCI_MAX_REXIT_TIMEOUT));
XHCI_MAX_REXIT_TIMEOUT_MS));
spin_lock_irqsave(&xhci->lock, flags);
if (time_left) {
@ -804,7 +804,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
} else {
int port_status = readl(port_array[wIndex]);
xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n",
XHCI_MAX_REXIT_TIMEOUT,
XHCI_MAX_REXIT_TIMEOUT_MS,
port_status);
status |= USB_PORT_STAT_SUSPEND;
clear_bit(wIndex, &bus_state->rexit_ports);
@ -1298,13 +1298,16 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
__le32 __iomem **port_array;
struct xhci_bus_state *bus_state;
unsigned long flags;
u32 portsc_buf[USB_MAXCHILDREN];
bool wake_enabled;
max_ports = xhci_get_ports(hcd, &port_array);
bus_state = &xhci->bus_state[hcd_index(hcd)];
wake_enabled = hcd->self.root_hub->do_remote_wakeup;
spin_lock_irqsave(&xhci->lock, flags);
if (hcd->self.root_hub->do_remote_wakeup) {
if (wake_enabled) {
if (bus_state->resuming_ports || /* USB2 */
bus_state->port_remote_wakeup) { /* USB3 */
spin_unlock_irqrestore(&xhci->lock, flags);
@ -1312,26 +1315,36 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
return -EBUSY;
}
}
port_index = max_ports;
/*
* Prepare ports for suspend, but don't write anything before all ports
* are checked and we know bus suspend can proceed
*/
bus_state->bus_suspended = 0;
port_index = max_ports;
while (port_index--) {
/* suspend the port if the port is not suspended */
u32 t1, t2;
int slot_id;
t1 = readl(port_array[port_index]);
t2 = xhci_port_state_to_neutral(t1);
portsc_buf[port_index] = 0;
if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) {
xhci_dbg(xhci, "port %d not suspended\n", port_index);
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
port_index + 1);
if (slot_id) {
/* Bail out if a USB3 port has a new device in link training */
if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) {
bus_state->bus_suspended = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
return -EBUSY;
}
/* suspend ports in U0, or bail out for new connect changes */
if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
if ((t1 & PORT_CSC) && wake_enabled) {
bus_state->bus_suspended = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_stop_device(xhci, slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
xhci_dbg(xhci, "Bus suspend bailout, port connect change\n");
return -EBUSY;
}
xhci_dbg(xhci, "port %d not suspended\n", port_index);
t2 &= ~PORT_PLS_MASK;
t2 |= PORT_LINK_STROBE | XDEV_U3;
set_bit(port_index, &bus_state->bus_suspended);
@ -1340,7 +1353,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
* including the USB 3.0 roothub, but only if CONFIG_PM
* is enabled, so also enable remote wake here.
*/
if (hcd->self.root_hub->do_remote_wakeup) {
if (wake_enabled) {
if (t1 & PORT_CONNECT) {
t2 |= PORT_WKOC_E | PORT_WKDISC_E;
t2 &= ~PORT_WKCONN_E;
@ -1353,7 +1366,26 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
t1 = xhci_port_state_to_neutral(t1);
if (t1 != t2)
writel(t2, port_array[port_index]);
portsc_buf[port_index] = t2;
}
/* write port settings, stopping and suspending ports if needed */
port_index = max_ports;
while (port_index--) {
if (!portsc_buf[port_index])
continue;
if (test_bit(port_index, &bus_state->bus_suspended)) {
int slot_id;
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
port_index + 1);
if (slot_id) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_stop_device(xhci, slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
}
}
writel(portsc_buf[port_index], port_array[port_index]);
}
hcd->state = HC_STATE_SUSPENDED;
bus_state->next_statechange = jiffies + msecs_to_jiffies(10);

View file

@ -1673,7 +1673,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
* RExit to a disconnect state). If so, let the the driver know it's
* out of the RExit state.
*/
if (!DEV_SUPERSPEED_ANY(temp) &&
if (!DEV_SUPERSPEED_ANY(temp) && hcd->speed < HCD_USB3 &&
test_and_clear_bit(faked_port_index,
&bus_state->rexit_ports)) {
complete(&bus_state->rexit_done[faked_port_index]);

View file

@ -1490,7 +1490,7 @@ struct xhci_bus_state {
* It can take up to 20 ms to transition from RExit to U0 on the
* Intel Lynx Point LP xHCI host.
*/
#define XHCI_MAX_REXIT_TIMEOUT (20 * 1000)
#define XHCI_MAX_REXIT_TIMEOUT_MS 20
static inline unsigned int hcd_index(struct usb_hcd *hcd)
{

View file

@ -76,15 +76,6 @@ static inline int dt_type(struct p9_wstat *mistat)
return rettype;
}
static void p9stat_init(struct p9_wstat *stbuf)
{
stbuf->name = NULL;
stbuf->uid = NULL;
stbuf->gid = NULL;
stbuf->muid = NULL;
stbuf->extension = NULL;
}
/**
* v9fs_alloc_rdir_buf - Allocate buffer used for read and readdir
* @filp: opened file structure
@ -145,12 +136,10 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
rdir->tail = n;
}
while (rdir->head < rdir->tail) {
p9stat_init(&st);
err = p9stat_read(fid->clnt, rdir->buf + rdir->head,
rdir->tail - rdir->head, &st);
if (err) {
p9_debug(P9_DEBUG_VFS, "returned %d\n", err);
p9stat_free(&st);
return -EIO;
}
reclen = st.size+2;

View file

@ -350,7 +350,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
s->s_magic = BFS_MAGIC;
if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end)) {
if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end) ||
le32_to_cpu(bfs_sb->s_start) < BFS_BSIZE) {
printf("Superblock is corrupted\n");
goto out1;
}
@ -359,9 +360,11 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
sizeof(struct bfs_inode)
+ BFS_ROOT_INO - 1;
imap_len = (info->si_lasti / 8) + 1;
info->si_imap = kzalloc(imap_len, GFP_KERNEL);
if (!info->si_imap)
info->si_imap = kzalloc(imap_len, GFP_KERNEL | __GFP_NOWARN);
if (!info->si_imap) {
printf("Cannot allocate %u bytes\n", imap_len);
goto out1;
}
for (i = 0; i < BFS_ROOT_INO; i++)
set_bit(i, info->si_imap);

View file

@ -10708,17 +10708,9 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
u64 start;
u64 end;
u64 trimmed = 0;
u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
int ret = 0;
/*
* try to trim all FS space, our block group may start from non-zero.
*/
if (range->len == total_bytes)
cache = btrfs_lookup_first_block_group(fs_info, range->start);
else
cache = btrfs_lookup_block_group(fs_info, range->start);
cache = btrfs_lookup_first_block_group(fs_info, range->start);
while (cache) {
if (cache->key.objectid >= (range->start + range->len)) {
btrfs_put_block_group(cache);

View file

@ -378,7 +378,6 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
struct fstrim_range range;
u64 minlen = ULLONG_MAX;
u64 num_devices = 0;
u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
int ret;
if (!capable(CAP_SYS_ADMIN))
@ -402,11 +401,15 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
return -EOPNOTSUPP;
if (copy_from_user(&range, arg, sizeof(range)))
return -EFAULT;
if (range.start > total_bytes ||
range.len < fs_info->sb->s_blocksize)
/*
* NOTE: Don't truncate the range using super->total_bytes. Bytenr of
* block group is in the logical address space, which can be any
* sectorsize aligned bytenr in the range [0, U64_MAX].
*/
if (range.len < fs_info->sb->s_blocksize)
return -EINVAL;
range.len = min(range.len, total_bytes - range.start);
range.minlen = max(range.minlen, minlen);
ret = btrfs_trim_fs(fs_info->tree_root, &range);
if (ret < 0)

View file

@ -71,13 +71,13 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
if (!sdp)
return NULL;
sb->s_fs_info = sdp;
sdp->sd_vfs = sb;
sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats);
if (!sdp->sd_lkstats) {
kfree(sdp);
return NULL;
}
sb->s_fs_info = sdp;
set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
gfs2_tune_init(&sdp->sd_tune);

View file

@ -884,6 +884,8 @@ static inline void put_link(struct nameidata *nd)
int sysctl_protected_symlinks __read_mostly = 0;
int sysctl_protected_hardlinks __read_mostly = 0;
int sysctl_protected_fifos __read_mostly;
int sysctl_protected_regular __read_mostly;
/**
* may_follow_link - Check symlink following for unsafe situations
@ -997,6 +999,45 @@ static int may_linkat(struct path *link)
return -EPERM;
}
/**
* may_create_in_sticky - Check whether an O_CREAT open in a sticky directory
* should be allowed, or not, on files that already
* exist.
* @dir: the sticky parent directory
* @inode: the inode of the file to open
*
* Block an O_CREAT open of a FIFO (or a regular file) when:
* - sysctl_protected_fifos (or sysctl_protected_regular) is enabled
* - the file already exists
* - we are in a sticky directory
* - we don't own the file
* - the owner of the directory doesn't own the file
* - the directory is world writable
* If the sysctl_protected_fifos (or sysctl_protected_regular) is set to 2
* the directory doesn't have to be world writable: being group writable will
* be enough.
*
* Returns 0 if the open is allowed, -ve on error.
*/
static int may_create_in_sticky(struct dentry * const dir,
struct inode * const inode)
{
if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
(!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
likely(!(dir->d_inode->i_mode & S_ISVTX)) ||
uid_eq(inode->i_uid, dir->d_inode->i_uid) ||
uid_eq(current_fsuid(), inode->i_uid))
return 0;
if (likely(dir->d_inode->i_mode & 0002) ||
(dir->d_inode->i_mode & 0020 &&
((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
(sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
return -EACCES;
}
return 0;
}
static __always_inline
const char *get_link(struct nameidata *nd)
{
@ -3198,9 +3239,15 @@ finish_open:
error = -ELOOP;
goto out;
}
error = -EISDIR;
if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
goto out;
if (open_flag & O_CREAT) {
error = -EISDIR;
if (d_is_dir(nd->path.dentry))
goto out;
error = may_create_in_sticky(dir,
d_backing_inode(nd->path.dentry));
if (unlikely(error))
goto out;
}
error = -ENOTDIR;
if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry))
goto out;

View file

@ -154,6 +154,7 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
unsigned int idx);
struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr);
unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
void can_free_echo_skb(struct net_device *dev, unsigned int idx);

View file

@ -68,6 +68,8 @@ extern struct inodes_stat_t inodes_stat;
extern int leases_enable, lease_break_time;
extern int sysctl_protected_symlinks;
extern int sysctl_protected_hardlinks;
extern int sysctl_protected_fifos;
extern int sysctl_protected_regular;
struct buffer_head;
typedef int (get_block_t)(struct inode *inode, sector_t iblock,

View file

@ -265,6 +265,8 @@ extern struct device_node *of_get_next_child(const struct device_node *node,
extern struct device_node *of_get_next_available_child(
const struct device_node *node, struct device_node *prev);
extern struct device_node *of_get_compatible_child(const struct device_node *parent,
const char *compatible);
extern struct device_node *of_get_child_by_name(const struct device_node *node,
const char *name);
@ -462,6 +464,12 @@ static inline bool of_have_populated_dt(void)
return false;
}
static inline struct device_node *of_get_compatible_child(const struct device_node *parent,
const char *compatible)
{
return NULL;
}
static inline struct device_node *of_get_child_by_name(
const struct device_node *node,
const char *name)

View file

@ -215,7 +215,7 @@ static char *kdb_read(char *buffer, size_t bufsize)
int count;
int i;
int diag, dtab_count;
int key;
int key, buf_size, ret;
static int last_crlf;
diag = kdbgetintenv("DTABCOUNT", &dtab_count);
@ -343,9 +343,8 @@ poll_again:
else
p_tmp = tmpbuffer;
len = strlen(p_tmp);
count = kallsyms_symbol_complete(p_tmp,
sizeof(tmpbuffer) -
(p_tmp - tmpbuffer));
buf_size = sizeof(tmpbuffer) - (p_tmp - tmpbuffer);
count = kallsyms_symbol_complete(p_tmp, buf_size);
if (tab == 2 && count > 0) {
kdb_printf("\n%d symbols are found.", count);
if (count > dtab_count) {
@ -357,9 +356,13 @@ poll_again:
}
kdb_printf("\n");
for (i = 0; i < count; i++) {
if (WARN_ON(!kallsyms_symbol_next(p_tmp, i)))
ret = kallsyms_symbol_next(p_tmp, i, buf_size);
if (WARN_ON(!ret))
break;
kdb_printf("%s ", p_tmp);
if (ret != -E2BIG)
kdb_printf("%s ", p_tmp);
else
kdb_printf("%s... ", p_tmp);
*(p_tmp + len) = '\0';
}
if (i >= dtab_count)

View file

@ -83,7 +83,7 @@ typedef struct __ksymtab {
unsigned long sym_start;
unsigned long sym_end;
} kdb_symtab_t;
extern int kallsyms_symbol_next(char *prefix_name, int flag);
extern int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size);
extern int kallsyms_symbol_complete(char *prefix_name, int max_len);
/* Exported Symbols for kernel loadable modules to use. */

View file

@ -221,11 +221,13 @@ int kallsyms_symbol_complete(char *prefix_name, int max_len)
* Parameters:
* prefix_name prefix of a symbol name to lookup
* flag 0 means search from the head, 1 means continue search.
* buf_size maximum length that can be written to prefix_name
* buffer
* Returns:
* 1 if a symbol matches the given prefix.
* 0 if no string found
*/
int kallsyms_symbol_next(char *prefix_name, int flag)
int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size)
{
int prefix_len = strlen(prefix_name);
static loff_t pos;
@ -235,10 +237,8 @@ int kallsyms_symbol_next(char *prefix_name, int flag)
pos = 0;
while ((name = kdb_walk_kallsyms(&pos))) {
if (strncmp(name, prefix_name, prefix_len) == 0) {
strncpy(prefix_name, name, strlen(name)+1);
return 1;
}
if (!strncmp(name, prefix_name, prefix_len))
return strscpy(prefix_name, name, buf_size);
}
return 0;
}

View file

@ -3944,8 +3944,8 @@ static int __sched_setscheduler(struct task_struct *p,
struct rq *rq;
int reset_on_fork;
/* may grab non-irq protected spin_locks */
BUG_ON(in_interrupt());
/* The pi code expects interrupts enabled */
BUG_ON(pi && in_interrupt());
recheck:
/* double check policy once rq lock held */
if (policy < 0) {

View file

@ -1805,6 +1805,24 @@ static struct ctl_table fs_table[] = {
.extra1 = &zero,
.extra2 = &one,
},
{
.procname = "protected_fifos",
.data = &sysctl_protected_fifos,
.maxlen = sizeof(int),
.mode = 0600,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &two,
},
{
.procname = "protected_regular",
.data = &sysctl_protected_regular,
.maxlen = sizeof(int),
.mode = 0600,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &two,
},
{
.procname = "suid_dumpable",
.data = &suid_dumpable,

View file

@ -1818,9 +1818,7 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
mutex_lock(&inode->i_mutex);
/* We're holding i_mutex so we can access i_size directly */
if (offset < 0)
offset = -EINVAL;
else if (offset >= inode->i_size)
if (offset < 0 || offset >= inode->i_size)
offset = -ENXIO;
else {
start = offset >> PAGE_CACHE_SHIFT;

View file

@ -726,7 +726,6 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
struct sk_buff *skb = NULL;
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
unsigned long cpu_flags;
size_t copied = 0;
u32 peek_seq = 0;
u32 *seq, skb_len;
@ -851,9 +850,8 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
goto copy_uaddr;
if (!(flags & MSG_PEEK)) {
spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
sk_eat_skb(sk, skb);
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
skb_unlink(skb, &sk->sk_receive_queue);
kfree_skb(skb);
*seq = 0;
}
@ -874,9 +872,8 @@ copy_uaddr:
llc_cmsg_rcv(msg, skb);
if (!(flags & MSG_PEEK)) {
spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
sk_eat_skb(sk, skb);
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
skb_unlink(skb, &sk->sk_receive_queue);
kfree_skb(skb);
*seq = 0;
}

View file

@ -3452,6 +3452,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
dreg = nft_type_to_reg(set->dtype);
list_for_each_entry(binding, &set->bindings, list) {
struct nft_ctx bind_ctx = {
.net = ctx->net,
.afi = ctx->afi,
.table = ctx->table,
.chain = (struct nft_chain *)binding->chain,

View file

@ -486,8 +486,9 @@ void sctp_assoc_set_primary(struct sctp_association *asoc,
void sctp_assoc_rm_peer(struct sctp_association *asoc,
struct sctp_transport *peer)
{
struct list_head *pos;
struct sctp_transport *transport;
struct sctp_transport *transport;
struct list_head *pos;
struct sctp_chunk *ch;
pr_debug("%s: association:%p addr:%pISpc\n",
__func__, asoc, &peer->ipaddr.sa);
@ -543,7 +544,6 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
*/
if (!list_empty(&peer->transmitted)) {
struct sctp_transport *active = asoc->peer.active_path;
struct sctp_chunk *ch;
/* Reset the transport of each chunk on this list */
list_for_each_entry(ch, &peer->transmitted,
@ -565,6 +565,10 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
sctp_transport_hold(active);
}
list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list)
if (ch->transport == peer)
ch->transport = NULL;
asoc->peer.transport_count--;
sctp_transport_free(peer);

View file

@ -272,13 +272,7 @@ static bool generic_key_to_expire(struct rpc_cred *cred)
{
struct auth_cred *acred = &container_of(cred, struct generic_cred,
gc_base)->acred;
bool ret;
get_rpccred(cred);
ret = test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
put_rpccred(cred);
return ret;
return test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
}
static const struct rpc_credops generic_credops = {