This is the 4.4.99 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAloQBz0ACgkQONu9yGCS aT5nQxAAs/xWKpYLSLvLPYnTOmSmNJ36isgFriVT+wWOLzkrWTWuoQnluDjMQjie nH6whZMlOnG+k5GrGF3XymxZ66tDj9TlXnPAHCC8ikcqir2/dBO/gO5v2gmFgF2E j52mt09I3acBQJEt+Rz3xJCMa5so61uDYGtqk/URcPEW1nBa1rfA1QIy/9zv2/aw 2yPSz4NQlv+7yvjguw4Ik5Yt/hGeu1Y8Kuc4bVHG2TB+y0QYwri42bBwQV7llili XqwfjFJYGMqWJqHGF/p0hD+/Xylw6GnDzxDZQDMNhsuWcfe3tUhuOVkX30E96fh2 ipT4wI5DTmql8EN/r/P7VS2BKL4W5HEMeNEd2APkGNGnSrzKGbd0CQ+cWVZbr645 R03AbqZjhaQKwRi+n82q1mMb4p+3Z/F/T8twHmYg/DLta3kzzRdfVJPNFBFIoSnF Bay8KJKqoMv2Bjhla78pHMoqSQ9j/fJc2iPIAABtFlsTjic+/STiS7ANAsmDdJtt 8XXc6mFQfbulKKlKKqudPLjOpUNu1SrsOcc9gmovbTy7dN6FBOfJwFMCYonNyXAc 6/ACSxYJlnZ9YEacEmcXmz0GTytyKiTYE3fNsXc/8fHnRZ1+yea9Mo77wWkj7K4V IqNIJMCW8K+P97oL6mdUBZwMUi4zrWueakMq8SWBdKYaD5yeV2k= =ql4j -----END PGP SIGNATURE----- Merge 4.4.99 into android-4.4 Changes in 4.4.99 mac80211: accept key reinstall without changing anything mac80211: use constant time comparison with keys mac80211: don't compare TKIP TX MIC key in reinstall prevention usb: usbtest: fix NULL pointer dereference Input: ims-psu - check if CDC union descriptor is sane ALSA: seq: Cancel pending autoload work at unbinding device tun/tap: sanitize TUNSETSNDBUF input tcp: fix tcp_mtu_probe() vs highest_sack l2tp: check ps->sock before running pppol2tp_session_ioctl() tun: call dev_get_valid_name() before register_netdevice() sctp: add the missing sock_owned_by_user check in sctp_icmp_redirect packet: avoid panic in packet_getsockopt() ipv6: flowlabel: do not leave opt->tot_len with garbage net/unix: don't show information about sockets from other namespaces ip6_gre: only increase err_count for some certain type icmpv6 in ip6gre_err tun: allow positive return values on dev_get_valid_name() call sctp: reset owner sk for data chunks on out queues when migrating a sock ppp: fix race in ppp device destruction ipip: only increase err_count for some certain type icmp in ipip_err tcp/dccp: fix ireq->opt races tcp/dccp: fix lockdep splat in inet_csk_route_req() tcp/dccp: fix other lockdep splats accessing ireq_opt security/keys: add CONFIG_KEYS_COMPAT to Kconfig tipc: fix link attribute propagation bug brcmfmac: remove setting IBSS mode when stopping AP target/iscsi: Fix iSCSI task reassignment handling target: Fix node_acl demo-mode + uncached dynamic shutdown regression misc: panel: properly restore atomic counter on error path Linux 4.4.99 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
7eab308a49
41 changed files with 300 additions and 148 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 98
|
||||
SUBLEVEL = 99
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -1082,11 +1082,6 @@ source "arch/powerpc/Kconfig.debug"
|
|||
|
||||
source "security/Kconfig"
|
||||
|
||||
config KEYS_COMPAT
|
||||
bool
|
||||
depends on COMPAT && KEYS
|
||||
default y
|
||||
|
||||
source "crypto/Kconfig"
|
||||
|
||||
config PPC_LIB_RHEAP
|
||||
|
|
|
@ -346,9 +346,6 @@ config COMPAT
|
|||
config SYSVIPC_COMPAT
|
||||
def_bool y if COMPAT && SYSVIPC
|
||||
|
||||
config KEYS_COMPAT
|
||||
def_bool y if COMPAT && KEYS
|
||||
|
||||
config SMP
|
||||
def_bool y
|
||||
prompt "Symmetric multi-processing support"
|
||||
|
|
|
@ -549,9 +549,6 @@ config SYSVIPC_COMPAT
|
|||
depends on COMPAT && SYSVIPC
|
||||
default y
|
||||
|
||||
config KEYS_COMPAT
|
||||
def_bool y if COMPAT && KEYS
|
||||
|
||||
endmenu
|
||||
|
||||
source "net/Kconfig"
|
||||
|
|
|
@ -2657,10 +2657,6 @@ config COMPAT_FOR_U64_ALIGNMENT
|
|||
config SYSVIPC_COMPAT
|
||||
def_bool y
|
||||
depends on SYSVIPC
|
||||
|
||||
config KEYS_COMPAT
|
||||
def_bool y
|
||||
depends on KEYS
|
||||
endif
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -1635,13 +1635,25 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
while (buflen > 0) {
|
||||
while (buflen >= sizeof(*union_desc)) {
|
||||
union_desc = (struct usb_cdc_union_desc *)buf;
|
||||
|
||||
if (union_desc->bLength > buflen) {
|
||||
dev_err(&intf->dev, "Too large descriptor\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE &&
|
||||
union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) {
|
||||
dev_dbg(&intf->dev, "Found union header\n");
|
||||
return union_desc;
|
||||
|
||||
if (union_desc->bLength >= sizeof(*union_desc))
|
||||
return union_desc;
|
||||
|
||||
dev_err(&intf->dev,
|
||||
"Union descriptor to short (%d vs %zd\n)",
|
||||
union_desc->bLength, sizeof(*union_desc));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
buflen -= union_desc->bLength;
|
||||
|
|
|
@ -1117,6 +1117,8 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
|
|||
case TUNSETSNDBUF:
|
||||
if (get_user(s, sp))
|
||||
return -EFAULT;
|
||||
if (s <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
q->sk.sk_sndbuf = s;
|
||||
return 0;
|
||||
|
|
|
@ -1110,7 +1110,17 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
|
|||
static struct lock_class_key ppp_tx_busylock;
|
||||
static int ppp_dev_init(struct net_device *dev)
|
||||
{
|
||||
struct ppp *ppp;
|
||||
|
||||
dev->qdisc_tx_busylock = &ppp_tx_busylock;
|
||||
|
||||
ppp = netdev_priv(dev);
|
||||
/* Let the netdevice take a reference on the ppp file. This ensures
|
||||
* that ppp_destroy_interface() won't run before the device gets
|
||||
* unregistered.
|
||||
*/
|
||||
atomic_inc(&ppp->file.refcnt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1133,6 +1143,15 @@ static void ppp_dev_uninit(struct net_device *dev)
|
|||
wake_up_interruptible(&ppp->file.rwait);
|
||||
}
|
||||
|
||||
static void ppp_dev_priv_destructor(struct net_device *dev)
|
||||
{
|
||||
struct ppp *ppp;
|
||||
|
||||
ppp = netdev_priv(dev);
|
||||
if (atomic_dec_and_test(&ppp->file.refcnt))
|
||||
ppp_destroy_interface(ppp);
|
||||
}
|
||||
|
||||
static const struct net_device_ops ppp_netdev_ops = {
|
||||
.ndo_init = ppp_dev_init,
|
||||
.ndo_uninit = ppp_dev_uninit,
|
||||
|
@ -1150,6 +1169,7 @@ static void ppp_setup(struct net_device *dev)
|
|||
dev->tx_queue_len = 3;
|
||||
dev->type = ARPHRD_PPP;
|
||||
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
|
||||
dev->destructor = ppp_dev_priv_destructor;
|
||||
netif_keep_dst(dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -1681,6 +1681,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
|||
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
err = dev_get_valid_name(net, dev, name);
|
||||
if (err < 0)
|
||||
goto err_free_dev;
|
||||
|
||||
dev_net_set(dev, net);
|
||||
dev->rtnl_link_ops = &tun_link_ops;
|
||||
|
@ -2068,6 +2071,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
|
|||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
if (sndbuf <= 0) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
tun->sndbuf = sndbuf;
|
||||
tun_set_sndbuf(tun);
|
||||
|
|
|
@ -4295,9 +4295,6 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
|
|||
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
|
||||
if (err < 0)
|
||||
brcmf_err("setting AP mode failed %d\n", err);
|
||||
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0);
|
||||
if (err < 0)
|
||||
brcmf_err("setting INFRA mode failed %d\n", err);
|
||||
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS))
|
||||
brcmf_fil_iovar_int_set(ifp, "mbss", 0);
|
||||
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
|
||||
|
|
|
@ -1431,17 +1431,25 @@ static ssize_t lcd_write(struct file *file,
|
|||
|
||||
static int lcd_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
if (!atomic_dec_and_test(&lcd_available))
|
||||
return -EBUSY; /* open only once at a time */
|
||||
int ret;
|
||||
|
||||
ret = -EBUSY;
|
||||
if (!atomic_dec_and_test(&lcd_available))
|
||||
goto fail; /* open only once at a time */
|
||||
|
||||
ret = -EPERM;
|
||||
if (file->f_mode & FMODE_READ) /* device is write-only */
|
||||
return -EPERM;
|
||||
goto fail;
|
||||
|
||||
if (lcd.must_clear) {
|
||||
lcd_clear_display();
|
||||
lcd.must_clear = false;
|
||||
}
|
||||
return nonseekable_open(inode, file);
|
||||
|
||||
fail:
|
||||
atomic_inc(&lcd_available);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int lcd_release(struct inode *inode, struct file *file)
|
||||
|
@ -1704,14 +1712,21 @@ static ssize_t keypad_read(struct file *file,
|
|||
|
||||
static int keypad_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
if (!atomic_dec_and_test(&keypad_available))
|
||||
return -EBUSY; /* open only once at a time */
|
||||
int ret;
|
||||
|
||||
ret = -EBUSY;
|
||||
if (!atomic_dec_and_test(&keypad_available))
|
||||
goto fail; /* open only once at a time */
|
||||
|
||||
ret = -EPERM;
|
||||
if (file->f_mode & FMODE_WRITE) /* device is read-only */
|
||||
return -EPERM;
|
||||
goto fail;
|
||||
|
||||
keypad_buflen = 0; /* flush the buffer on opening */
|
||||
return 0;
|
||||
fail:
|
||||
atomic_inc(&keypad_available);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int keypad_release(struct inode *inode, struct file *file)
|
||||
|
|
|
@ -1759,7 +1759,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|||
struct iscsi_tm *hdr;
|
||||
int out_of_order_cmdsn = 0, ret;
|
||||
bool sess_ref = false;
|
||||
u8 function;
|
||||
u8 function, tcm_function = TMR_UNKNOWN;
|
||||
|
||||
hdr = (struct iscsi_tm *) buf;
|
||||
hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
|
||||
|
@ -1805,10 +1805,6 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|||
* LIO-Target $FABRIC_MOD
|
||||
*/
|
||||
if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
|
||||
|
||||
u8 tcm_function;
|
||||
int ret;
|
||||
|
||||
transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
|
||||
conn->sess->se_sess, 0, DMA_NONE,
|
||||
TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
|
||||
|
@ -1844,15 +1840,14 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|||
return iscsit_add_reject_cmd(cmd,
|
||||
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
|
||||
}
|
||||
|
||||
ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req,
|
||||
tcm_function, GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
return iscsit_add_reject_cmd(cmd,
|
||||
}
|
||||
ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function,
|
||||
GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
return iscsit_add_reject_cmd(cmd,
|
||||
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
|
||||
|
||||
cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
|
||||
}
|
||||
cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
|
||||
|
||||
cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC;
|
||||
cmd->i_state = ISTATE_SEND_TASKMGTRSP;
|
||||
|
|
|
@ -350,7 +350,7 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
|
|||
if (acl->dynamic_node_acl) {
|
||||
acl->dynamic_node_acl = 0;
|
||||
}
|
||||
list_del(&acl->acl_list);
|
||||
list_del_init(&acl->acl_list);
|
||||
tpg->num_node_acls--;
|
||||
mutex_unlock(&tpg->acl_node_mutex);
|
||||
|
||||
|
@ -572,7 +572,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
|
|||
* in transport_deregister_session().
|
||||
*/
|
||||
list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
|
||||
list_del(&nacl->acl_list);
|
||||
list_del_init(&nacl->acl_list);
|
||||
se_tpg->num_node_acls--;
|
||||
|
||||
core_tpg_wait_for_nacl_pr_ref(nacl);
|
||||
|
|
|
@ -431,7 +431,7 @@ static void target_complete_nacl(struct kref *kref)
|
|||
}
|
||||
|
||||
mutex_lock(&se_tpg->acl_node_mutex);
|
||||
list_del(&nacl->acl_list);
|
||||
list_del_init(&nacl->acl_list);
|
||||
mutex_unlock(&se_tpg->acl_node_mutex);
|
||||
|
||||
core_tpg_wait_for_nacl_pr_ref(nacl);
|
||||
|
@ -503,7 +503,7 @@ void transport_free_session(struct se_session *se_sess)
|
|||
spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
|
||||
|
||||
if (se_nacl->dynamic_stop)
|
||||
list_del(&se_nacl->acl_list);
|
||||
list_del_init(&se_nacl->acl_list);
|
||||
}
|
||||
mutex_unlock(&se_tpg->acl_node_mutex);
|
||||
|
||||
|
|
|
@ -185,12 +185,13 @@ found:
|
|||
return tmp;
|
||||
}
|
||||
|
||||
if (in) {
|
||||
if (in)
|
||||
dev->in_pipe = usb_rcvbulkpipe(udev,
|
||||
in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
|
||||
if (out)
|
||||
dev->out_pipe = usb_sndbulkpipe(udev,
|
||||
out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
|
||||
}
|
||||
|
||||
if (iso_in) {
|
||||
dev->iso_in = &iso_in->desc;
|
||||
dev->in_iso_pipe = usb_rcvisocpipe(udev,
|
||||
|
|
|
@ -3469,6 +3469,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
|
|||
unsigned char name_assign_type,
|
||||
void (*setup)(struct net_device *),
|
||||
unsigned int txqs, unsigned int rxqs);
|
||||
int dev_get_valid_name(struct net *net, struct net_device *dev,
|
||||
const char *name);
|
||||
|
||||
#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
|
||||
alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ struct inet_request_sock {
|
|||
kmemcheck_bitfield_end(flags);
|
||||
u32 ir_mark;
|
||||
union {
|
||||
struct ip_options_rcu *opt;
|
||||
struct ip_options_rcu __rcu *ireq_opt;
|
||||
struct sk_buff *pktopts;
|
||||
};
|
||||
};
|
||||
|
@ -113,6 +113,12 @@ static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb)
|
|||
return sk->sk_mark;
|
||||
}
|
||||
|
||||
static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
|
||||
{
|
||||
return rcu_dereference_check(ireq->ireq_opt,
|
||||
atomic_read(&ireq->req.rsk_refcnt) > 0);
|
||||
}
|
||||
|
||||
struct inet_cork {
|
||||
unsigned int flags;
|
||||
__be32 addr;
|
||||
|
|
|
@ -1615,12 +1615,12 @@ static inline void tcp_highest_sack_reset(struct sock *sk)
|
|||
tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
|
||||
}
|
||||
|
||||
/* Called when old skb is about to be deleted (to be combined with new skb) */
|
||||
static inline void tcp_highest_sack_combine(struct sock *sk,
|
||||
/* Called when old skb is about to be deleted and replaced by new skb */
|
||||
static inline void tcp_highest_sack_replace(struct sock *sk,
|
||||
struct sk_buff *old,
|
||||
struct sk_buff *new)
|
||||
{
|
||||
if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
|
||||
if (old == tcp_highest_sack(sk))
|
||||
tcp_sk(sk)->highest_sack = new;
|
||||
}
|
||||
|
||||
|
|
|
@ -199,6 +199,7 @@ enum tcm_tmreq_table {
|
|||
TMR_LUN_RESET = 5,
|
||||
TMR_TARGET_WARM_RESET = 6,
|
||||
TMR_TARGET_COLD_RESET = 7,
|
||||
TMR_UNKNOWN = 0xff,
|
||||
};
|
||||
|
||||
/* fabric independent task management response values */
|
||||
|
|
|
@ -1111,9 +1111,8 @@ static int dev_alloc_name_ns(struct net *net,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int dev_get_valid_name(struct net *net,
|
||||
struct net_device *dev,
|
||||
const char *name)
|
||||
int dev_get_valid_name(struct net *net, struct net_device *dev,
|
||||
const char *name)
|
||||
{
|
||||
BUG_ON(!net);
|
||||
|
||||
|
@ -1129,6 +1128,7 @@ static int dev_get_valid_name(struct net *net,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dev_get_valid_name);
|
||||
|
||||
/**
|
||||
* dev_change_name - change name of a device
|
||||
|
|
|
@ -414,8 +414,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
|
|||
sk_daddr_set(newsk, ireq->ir_rmt_addr);
|
||||
sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
|
||||
newinet->inet_saddr = ireq->ir_loc_addr;
|
||||
newinet->inet_opt = ireq->opt;
|
||||
ireq->opt = NULL;
|
||||
RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt));
|
||||
newinet->mc_index = inet_iif(skb);
|
||||
newinet->mc_ttl = ip_hdr(skb)->ttl;
|
||||
newinet->inet_id = jiffies;
|
||||
|
@ -430,7 +429,10 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
|
|||
if (__inet_inherit_port(sk, newsk) < 0)
|
||||
goto put_and_exit;
|
||||
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
|
||||
|
||||
if (*own_req)
|
||||
ireq->ireq_opt = NULL;
|
||||
else
|
||||
newinet->inet_opt = NULL;
|
||||
return newsk;
|
||||
|
||||
exit_overflow:
|
||||
|
@ -441,6 +443,7 @@ exit:
|
|||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
||||
return NULL;
|
||||
put_and_exit:
|
||||
newinet->inet_opt = NULL;
|
||||
inet_csk_prepare_forced_close(newsk);
|
||||
dccp_done(newsk);
|
||||
goto exit;
|
||||
|
@ -492,7 +495,7 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
|
|||
ireq->ir_rmt_addr);
|
||||
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
|
||||
ireq->ir_rmt_addr,
|
||||
ireq->opt);
|
||||
ireq_opt_deref(ireq));
|
||||
err = net_xmit_eval(err);
|
||||
}
|
||||
|
||||
|
@ -546,7 +549,7 @@ out:
|
|||
static void dccp_v4_reqsk_destructor(struct request_sock *req)
|
||||
{
|
||||
dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
|
||||
kfree(inet_rsk(req)->opt);
|
||||
kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
|
||||
}
|
||||
|
||||
void dccp_syn_ack_timeout(const struct request_sock *req)
|
||||
|
|
|
@ -2012,7 +2012,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
|
|||
buf = NULL;
|
||||
|
||||
req_inet = inet_rsk(req);
|
||||
opt = xchg(&req_inet->opt, opt);
|
||||
opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt);
|
||||
if (opt)
|
||||
kfree_rcu(opt, rcu);
|
||||
|
||||
|
@ -2034,11 +2034,13 @@ req_setattr_failure:
|
|||
* values on failure.
|
||||
*
|
||||
*/
|
||||
static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
|
||||
static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
|
||||
{
|
||||
struct ip_options_rcu *opt = rcu_dereference_protected(*opt_ptr, 1);
|
||||
int hdr_delta = 0;
|
||||
struct ip_options_rcu *opt = *opt_ptr;
|
||||
|
||||
if (!opt || opt->opt.cipso == 0)
|
||||
return 0;
|
||||
if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {
|
||||
u8 cipso_len;
|
||||
u8 cipso_off;
|
||||
|
@ -2100,14 +2102,10 @@ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
|
|||
*/
|
||||
void cipso_v4_sock_delattr(struct sock *sk)
|
||||
{
|
||||
int hdr_delta;
|
||||
struct ip_options_rcu *opt;
|
||||
struct inet_sock *sk_inet;
|
||||
int hdr_delta;
|
||||
|
||||
sk_inet = inet_sk(sk);
|
||||
opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
|
||||
if (!opt || opt->opt.cipso == 0)
|
||||
return;
|
||||
|
||||
hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
|
||||
if (sk_inet->is_icsk && hdr_delta > 0) {
|
||||
|
@ -2127,15 +2125,7 @@ void cipso_v4_sock_delattr(struct sock *sk)
|
|||
*/
|
||||
void cipso_v4_req_delattr(struct request_sock *req)
|
||||
{
|
||||
struct ip_options_rcu *opt;
|
||||
struct inet_request_sock *req_inet;
|
||||
|
||||
req_inet = inet_rsk(req);
|
||||
opt = req_inet->opt;
|
||||
if (!opt || opt->opt.cipso == 0)
|
||||
return;
|
||||
|
||||
cipso_v4_delopt(&req_inet->opt);
|
||||
cipso_v4_delopt(&inet_rsk(req)->ireq_opt);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -412,9 +412,11 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
|
|||
{
|
||||
const struct inet_request_sock *ireq = inet_rsk(req);
|
||||
struct net *net = read_pnet(&ireq->ireq_net);
|
||||
struct ip_options_rcu *opt = ireq->opt;
|
||||
struct ip_options_rcu *opt;
|
||||
struct rtable *rt;
|
||||
|
||||
opt = ireq_opt_deref(ireq);
|
||||
|
||||
flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
|
||||
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
|
||||
sk->sk_protocol, inet_sk_flowi_flags(sk),
|
||||
|
@ -448,10 +450,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
|
|||
struct flowi4 *fl4;
|
||||
struct rtable *rt;
|
||||
|
||||
opt = rcu_dereference(ireq->ireq_opt);
|
||||
fl4 = &newinet->cork.fl.u.ip4;
|
||||
|
||||
rcu_read_lock();
|
||||
opt = rcu_dereference(newinet->inet_opt);
|
||||
flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
|
||||
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
|
||||
sk->sk_protocol, inet_sk_flowi_flags(sk),
|
||||
|
@ -464,13 +465,11 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
|
|||
goto no_route;
|
||||
if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
|
||||
goto route_err;
|
||||
rcu_read_unlock();
|
||||
return &rt->dst;
|
||||
|
||||
route_err:
|
||||
ip_rt_put(rt);
|
||||
no_route:
|
||||
rcu_read_unlock();
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -129,42 +129,68 @@ static struct rtnl_link_ops ipip_link_ops __read_mostly;
|
|||
static int ipip_err(struct sk_buff *skb, u32 info)
|
||||
{
|
||||
|
||||
/* All the routers (except for Linux) return only
|
||||
8 bytes of packet payload. It means, that precise relaying of
|
||||
ICMP in the real Internet is absolutely infeasible.
|
||||
*/
|
||||
/* All the routers (except for Linux) return only
|
||||
8 bytes of packet payload. It means, that precise relaying of
|
||||
ICMP in the real Internet is absolutely infeasible.
|
||||
*/
|
||||
struct net *net = dev_net(skb->dev);
|
||||
struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
|
||||
const struct iphdr *iph = (const struct iphdr *)skb->data;
|
||||
struct ip_tunnel *t;
|
||||
int err;
|
||||
const int type = icmp_hdr(skb)->type;
|
||||
const int code = icmp_hdr(skb)->code;
|
||||
struct ip_tunnel *t;
|
||||
int err = 0;
|
||||
|
||||
switch (type) {
|
||||
case ICMP_DEST_UNREACH:
|
||||
switch (code) {
|
||||
case ICMP_SR_FAILED:
|
||||
/* Impossible event. */
|
||||
goto out;
|
||||
default:
|
||||
/* All others are translated to HOST_UNREACH.
|
||||
* rfc2003 contains "deep thoughts" about NET_UNREACH,
|
||||
* I believe they are just ether pollution. --ANK
|
||||
*/
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
case ICMP_TIME_EXCEEDED:
|
||||
if (code != ICMP_EXC_TTL)
|
||||
goto out;
|
||||
break;
|
||||
|
||||
case ICMP_REDIRECT:
|
||||
break;
|
||||
|
||||
default:
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = -ENOENT;
|
||||
t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
|
||||
iph->daddr, iph->saddr, 0);
|
||||
if (!t)
|
||||
if (!t) {
|
||||
err = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
|
||||
ipv4_update_pmtu(skb, dev_net(skb->dev), info,
|
||||
t->parms.link, 0, IPPROTO_IPIP, 0);
|
||||
err = 0;
|
||||
ipv4_update_pmtu(skb, net, info, t->parms.link, 0,
|
||||
iph->protocol, 0);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (type == ICMP_REDIRECT) {
|
||||
ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
|
||||
IPPROTO_IPIP, 0);
|
||||
err = 0;
|
||||
ipv4_redirect(skb, net, t->parms.link, 0, iph->protocol, 0);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (t->parms.iph.daddr == 0)
|
||||
if (t->parms.iph.daddr == 0) {
|
||||
err = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -357,7 +357,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
|
|||
/* We throwed the options of the initial SYN away, so we hope
|
||||
* the ACK carries the same options again (see RFC1122 4.2.3.8)
|
||||
*/
|
||||
ireq->opt = tcp_v4_save_options(skb);
|
||||
RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(skb));
|
||||
|
||||
if (security_inet_conn_request(sk, skb, req)) {
|
||||
reqsk_free(req);
|
||||
|
|
|
@ -6106,7 +6106,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
|
|||
struct inet_request_sock *ireq = inet_rsk(req);
|
||||
|
||||
kmemcheck_annotate_bitfield(ireq, flags);
|
||||
ireq->opt = NULL;
|
||||
ireq->ireq_opt = NULL;
|
||||
atomic64_set(&ireq->ir_cookie, 0);
|
||||
ireq->ireq_state = TCP_NEW_SYN_RECV;
|
||||
write_pnet(&ireq->ireq_net, sock_net(sk_listener));
|
||||
|
|
|
@ -850,7 +850,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
|
|||
|
||||
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
|
||||
ireq->ir_rmt_addr,
|
||||
ireq->opt);
|
||||
ireq_opt_deref(ireq));
|
||||
err = net_xmit_eval(err);
|
||||
}
|
||||
|
||||
|
@ -862,7 +862,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
|
|||
*/
|
||||
static void tcp_v4_reqsk_destructor(struct request_sock *req)
|
||||
{
|
||||
kfree(inet_rsk(req)->opt);
|
||||
kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
|
||||
}
|
||||
|
||||
|
||||
|
@ -1191,7 +1191,7 @@ static void tcp_v4_init_req(struct request_sock *req,
|
|||
sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
|
||||
sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
|
||||
ireq->no_srccheck = inet_sk(sk_listener)->transparent;
|
||||
ireq->opt = tcp_v4_save_options(skb);
|
||||
RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(skb));
|
||||
}
|
||||
|
||||
static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
|
||||
|
@ -1286,10 +1286,9 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
|
|||
ireq = inet_rsk(req);
|
||||
sk_daddr_set(newsk, ireq->ir_rmt_addr);
|
||||
sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
|
||||
newinet->inet_saddr = ireq->ir_loc_addr;
|
||||
inet_opt = ireq->opt;
|
||||
rcu_assign_pointer(newinet->inet_opt, inet_opt);
|
||||
ireq->opt = NULL;
|
||||
newinet->inet_saddr = ireq->ir_loc_addr;
|
||||
inet_opt = rcu_dereference(ireq->ireq_opt);
|
||||
RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
|
||||
newinet->mc_index = inet_iif(skb);
|
||||
newinet->mc_ttl = ip_hdr(skb)->ttl;
|
||||
newinet->rcv_tos = ip_hdr(skb)->tos;
|
||||
|
@ -1337,9 +1336,12 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
|
|||
if (__inet_inherit_port(sk, newsk) < 0)
|
||||
goto put_and_exit;
|
||||
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
|
||||
if (*own_req)
|
||||
if (likely(*own_req)) {
|
||||
tcp_move_syn(newtp, req);
|
||||
|
||||
ireq->ireq_opt = NULL;
|
||||
} else {
|
||||
newinet->inet_opt = NULL;
|
||||
}
|
||||
return newsk;
|
||||
|
||||
exit_overflow:
|
||||
|
@ -1350,6 +1352,7 @@ exit:
|
|||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
||||
return NULL;
|
||||
put_and_exit:
|
||||
newinet->inet_opt = NULL;
|
||||
inet_csk_prepare_forced_close(newsk);
|
||||
tcp_done(newsk);
|
||||
goto exit;
|
||||
|
|
|
@ -1951,6 +1951,7 @@ static int tcp_mtu_probe(struct sock *sk)
|
|||
nskb->ip_summed = skb->ip_summed;
|
||||
|
||||
tcp_insert_write_queue_before(nskb, skb, sk);
|
||||
tcp_highest_sack_replace(sk, skb, nskb);
|
||||
|
||||
len = 0;
|
||||
tcp_for_write_queue_from_safe(skb, next, sk) {
|
||||
|
@ -2464,7 +2465,7 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
|
||||
|
||||
tcp_highest_sack_combine(sk, next_skb, skb);
|
||||
tcp_highest_sack_replace(sk, next_skb, skb);
|
||||
|
||||
tcp_unlink_write_queue(next_skb, sk);
|
||||
|
||||
|
|
|
@ -315,6 +315,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
|
|||
}
|
||||
opt_space->dst1opt = fopt->dst1opt;
|
||||
opt_space->opt_flen = fopt->opt_flen;
|
||||
opt_space->tot_len = fopt->tot_len;
|
||||
return opt_space;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fl6_merge_options);
|
||||
|
|
|
@ -409,13 +409,16 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
|||
case ICMPV6_DEST_UNREACH:
|
||||
net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
|
||||
t->parms.name);
|
||||
break;
|
||||
if (code != ICMPV6_PORT_UNREACH)
|
||||
break;
|
||||
return;
|
||||
case ICMPV6_TIME_EXCEED:
|
||||
if (code == ICMPV6_EXC_HOPLIMIT) {
|
||||
net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
|
||||
t->parms.name);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
return;
|
||||
case ICMPV6_PARAMPROB:
|
||||
teli = 0;
|
||||
if (code == ICMPV6_HDR_FIELD)
|
||||
|
@ -431,13 +434,13 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
|||
net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
|
||||
t->parms.name);
|
||||
}
|
||||
break;
|
||||
return;
|
||||
case ICMPV6_PKT_TOOBIG:
|
||||
mtu = be32_to_cpu(info) - offset;
|
||||
if (mtu < IPV6_MIN_MTU)
|
||||
mtu = IPV6_MIN_MTU;
|
||||
t->dev->mtu = mtu;
|
||||
break;
|
||||
return;
|
||||
}
|
||||
|
||||
if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
|
||||
|
|
|
@ -1201,11 +1201,11 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
|
|||
if (WARN_ON(v6_cork->opt))
|
||||
return -EINVAL;
|
||||
|
||||
v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
|
||||
v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation);
|
||||
if (unlikely(!v6_cork->opt))
|
||||
return -ENOBUFS;
|
||||
|
||||
v6_cork->opt->tot_len = opt->tot_len;
|
||||
v6_cork->opt->tot_len = sizeof(*opt);
|
||||
v6_cork->opt->opt_flen = opt->opt_flen;
|
||||
v6_cork->opt->opt_nflen = opt->opt_nflen;
|
||||
|
||||
|
|
|
@ -1015,6 +1015,9 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
|
|||
session->name, cmd, arg);
|
||||
|
||||
sk = ps->sock;
|
||||
if (!sk)
|
||||
return -EBADR;
|
||||
|
||||
sock_hold(sk);
|
||||
|
||||
switch (cmd) {
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
|
||||
* Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
|
||||
* Copyright 2013-2014 Intel Mobile Communications GmbH
|
||||
* Copyright 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
@ -18,6 +19,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <net/mac80211.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include "ieee80211_i.h"
|
||||
#include "driver-ops.h"
|
||||
|
@ -606,6 +608,39 @@ void ieee80211_key_free_unused(struct ieee80211_key *key)
|
|||
ieee80211_key_free_common(key);
|
||||
}
|
||||
|
||||
static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata,
|
||||
struct ieee80211_key *old,
|
||||
struct ieee80211_key *new)
|
||||
{
|
||||
u8 tkip_old[WLAN_KEY_LEN_TKIP], tkip_new[WLAN_KEY_LEN_TKIP];
|
||||
u8 *tk_old, *tk_new;
|
||||
|
||||
if (!old || new->conf.keylen != old->conf.keylen)
|
||||
return false;
|
||||
|
||||
tk_old = old->conf.key;
|
||||
tk_new = new->conf.key;
|
||||
|
||||
/*
|
||||
* In station mode, don't compare the TX MIC key, as it's never used
|
||||
* and offloaded rekeying may not care to send it to the host. This
|
||||
* is the case in iwlwifi, for example.
|
||||
*/
|
||||
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
|
||||
new->conf.cipher == WLAN_CIPHER_SUITE_TKIP &&
|
||||
new->conf.keylen == WLAN_KEY_LEN_TKIP &&
|
||||
!(new->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
|
||||
memcpy(tkip_old, tk_old, WLAN_KEY_LEN_TKIP);
|
||||
memcpy(tkip_new, tk_new, WLAN_KEY_LEN_TKIP);
|
||||
memset(tkip_old + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
|
||||
memset(tkip_new + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
|
||||
tk_old = tkip_old;
|
||||
tk_new = tkip_new;
|
||||
}
|
||||
|
||||
return !crypto_memneq(tk_old, tk_new, new->conf.keylen);
|
||||
}
|
||||
|
||||
int ieee80211_key_link(struct ieee80211_key *key,
|
||||
struct ieee80211_sub_if_data *sdata,
|
||||
struct sta_info *sta)
|
||||
|
@ -617,9 +652,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
|
|||
|
||||
pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
|
||||
idx = key->conf.keyidx;
|
||||
key->local = sdata->local;
|
||||
key->sdata = sdata;
|
||||
key->sta = sta;
|
||||
|
||||
mutex_lock(&sdata->local->key_mtx);
|
||||
|
||||
|
@ -630,6 +662,20 @@ int ieee80211_key_link(struct ieee80211_key *key,
|
|||
else
|
||||
old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
|
||||
|
||||
/*
|
||||
* Silently accept key re-installation without really installing the
|
||||
* new version of the key to avoid nonce reuse or replay issues.
|
||||
*/
|
||||
if (ieee80211_key_identical(sdata, old_key, key)) {
|
||||
ieee80211_key_free_unused(key);
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
key->local = sdata->local;
|
||||
key->sdata = sdata;
|
||||
key->sta = sta;
|
||||
|
||||
increment_tailroom_need_count(sdata);
|
||||
|
||||
ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
|
||||
|
@ -645,6 +691,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
|
|||
ret = 0;
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&sdata->local->key_mtx);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -1724,7 +1724,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
|
|||
|
||||
out:
|
||||
if (err && rollover) {
|
||||
kfree(rollover);
|
||||
kfree_rcu(rollover, rcu);
|
||||
po->rollover = NULL;
|
||||
}
|
||||
mutex_unlock(&fanout_mutex);
|
||||
|
@ -1751,8 +1751,10 @@ static struct packet_fanout *fanout_release(struct sock *sk)
|
|||
else
|
||||
f = NULL;
|
||||
|
||||
if (po->rollover)
|
||||
if (po->rollover) {
|
||||
kfree_rcu(po->rollover, rcu);
|
||||
po->rollover = NULL;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&fanout_mutex);
|
||||
|
||||
|
@ -3769,6 +3771,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
|
|||
void *data = &val;
|
||||
union tpacket_stats_u st;
|
||||
struct tpacket_rollover_stats rstats;
|
||||
struct packet_rollover *rollover;
|
||||
|
||||
if (level != SOL_PACKET)
|
||||
return -ENOPROTOOPT;
|
||||
|
@ -3847,13 +3850,18 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
|
|||
0);
|
||||
break;
|
||||
case PACKET_ROLLOVER_STATS:
|
||||
if (!po->rollover)
|
||||
rcu_read_lock();
|
||||
rollover = rcu_dereference(po->rollover);
|
||||
if (rollover) {
|
||||
rstats.tp_all = atomic_long_read(&rollover->num);
|
||||
rstats.tp_huge = atomic_long_read(&rollover->num_huge);
|
||||
rstats.tp_failed = atomic_long_read(&rollover->num_failed);
|
||||
data = &rstats;
|
||||
lv = sizeof(rstats);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
if (!rollover)
|
||||
return -EINVAL;
|
||||
rstats.tp_all = atomic_long_read(&po->rollover->num);
|
||||
rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
|
||||
rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
|
||||
data = &rstats;
|
||||
lv = sizeof(rstats);
|
||||
break;
|
||||
case PACKET_TX_HAS_OFF:
|
||||
val = po->tp_tx_has_off;
|
||||
|
|
|
@ -420,7 +420,7 @@ void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
|
|||
{
|
||||
struct dst_entry *dst;
|
||||
|
||||
if (!t)
|
||||
if (sock_owned_by_user(sk) || !t)
|
||||
return;
|
||||
dst = sctp_transport_dst_check(t);
|
||||
if (dst)
|
||||
|
|
|
@ -168,6 +168,36 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
|
|||
sk_mem_charge(sk, chunk->skb->truesize);
|
||||
}
|
||||
|
||||
static void sctp_clear_owner_w(struct sctp_chunk *chunk)
|
||||
{
|
||||
skb_orphan(chunk->skb);
|
||||
}
|
||||
|
||||
static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
|
||||
void (*cb)(struct sctp_chunk *))
|
||||
|
||||
{
|
||||
struct sctp_outq *q = &asoc->outqueue;
|
||||
struct sctp_transport *t;
|
||||
struct sctp_chunk *chunk;
|
||||
|
||||
list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
|
||||
list_for_each_entry(chunk, &t->transmitted, transmitted_list)
|
||||
cb(chunk);
|
||||
|
||||
list_for_each_entry(chunk, &q->retransmit, list)
|
||||
cb(chunk);
|
||||
|
||||
list_for_each_entry(chunk, &q->sacked, list)
|
||||
cb(chunk);
|
||||
|
||||
list_for_each_entry(chunk, &q->abandoned, list)
|
||||
cb(chunk);
|
||||
|
||||
list_for_each_entry(chunk, &q->out_chunk_list, list)
|
||||
cb(chunk);
|
||||
}
|
||||
|
||||
/* Verify that this is a valid address. */
|
||||
static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
|
||||
int len)
|
||||
|
@ -7362,7 +7392,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
|
|||
* paths won't try to lock it and then oldsk.
|
||||
*/
|
||||
lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
|
||||
sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w);
|
||||
sctp_assoc_migrate(assoc, newsk);
|
||||
sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w);
|
||||
|
||||
/* If the association on the newsk is already closed before accept()
|
||||
* is called, set RCV_SHUTDOWN flag.
|
||||
|
|
|
@ -1084,25 +1084,6 @@ drop:
|
|||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Send protocol message to the other endpoint.
|
||||
*/
|
||||
void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
|
||||
u32 gap, u32 tolerance, u32 priority)
|
||||
{
|
||||
struct sk_buff *skb = NULL;
|
||||
struct sk_buff_head xmitq;
|
||||
|
||||
__skb_queue_head_init(&xmitq);
|
||||
tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap,
|
||||
tolerance, priority, &xmitq);
|
||||
skb = __skb_dequeue(&xmitq);
|
||||
if (!skb)
|
||||
return;
|
||||
tipc_bearer_xmit_skb(l->net, l->bearer_id, skb, l->media_addr);
|
||||
l->rcv_unacked = 0;
|
||||
}
|
||||
|
||||
static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
|
||||
u16 rcvgap, int tolerance, int priority,
|
||||
struct sk_buff_head *xmitq)
|
||||
|
@ -1636,9 +1617,12 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
|
|||
char *name;
|
||||
struct tipc_link *link;
|
||||
struct tipc_node *node;
|
||||
struct sk_buff_head xmitq;
|
||||
struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
|
||||
struct net *net = sock_net(skb->sk);
|
||||
|
||||
__skb_queue_head_init(&xmitq);
|
||||
|
||||
if (!info->attrs[TIPC_NLA_LINK])
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1683,14 +1667,14 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
|
|||
|
||||
tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
|
||||
link->tolerance = tol;
|
||||
tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
|
||||
tipc_link_build_proto_msg(link, STATE_MSG, 0, 0, tol, 0, &xmitq);
|
||||
}
|
||||
if (props[TIPC_NLA_PROP_PRIO]) {
|
||||
u32 prio;
|
||||
|
||||
prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
|
||||
link->priority = prio;
|
||||
tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio);
|
||||
tipc_link_build_proto_msg(link, STATE_MSG, 0, 0, 0, prio, &xmitq);
|
||||
}
|
||||
if (props[TIPC_NLA_PROP_WIN]) {
|
||||
u32 win;
|
||||
|
@ -1702,7 +1686,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
|
|||
|
||||
out:
|
||||
tipc_node_unlock(node);
|
||||
|
||||
tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
|
|
@ -153,7 +153,6 @@ struct tipc_stats {
|
|||
struct tipc_link {
|
||||
u32 addr;
|
||||
char name[TIPC_MAX_LINK_NAME];
|
||||
struct tipc_media_addr *media_addr;
|
||||
struct net *net;
|
||||
|
||||
/* Management and link supervision data */
|
||||
|
|
|
@ -257,6 +257,8 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
|
|||
err = -ENOENT;
|
||||
if (sk == NULL)
|
||||
goto out_nosk;
|
||||
if (!net_eq(sock_net(sk), net))
|
||||
goto out;
|
||||
|
||||
err = sock_diag_check_cookie(sk, req->udiag_cookie);
|
||||
if (err)
|
||||
|
|
|
@ -20,6 +20,10 @@ config KEYS
|
|||
|
||||
If you are unsure as to whether this is required, answer N.
|
||||
|
||||
config KEYS_COMPAT
|
||||
def_bool y
|
||||
depends on COMPAT && KEYS
|
||||
|
||||
config PERSISTENT_KEYRINGS
|
||||
bool "Enable register of persistent per-UID keyrings"
|
||||
depends on KEYS
|
||||
|
|
|
@ -148,8 +148,10 @@ void snd_seq_device_load_drivers(void)
|
|||
flush_work(&autoload_work);
|
||||
}
|
||||
EXPORT_SYMBOL(snd_seq_device_load_drivers);
|
||||
#define cancel_autoload_drivers() cancel_work_sync(&autoload_work)
|
||||
#else
|
||||
#define queue_autoload_drivers() /* NOP */
|
||||
#define cancel_autoload_drivers() /* NOP */
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -159,6 +161,7 @@ static int snd_seq_device_dev_free(struct snd_device *device)
|
|||
{
|
||||
struct snd_seq_device *dev = device->device_data;
|
||||
|
||||
cancel_autoload_drivers();
|
||||
put_device(&dev->dev);
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue