Merge remote-tracking branch 'common/android-4.4' into android-4.4.y

This commit is contained in:
Dmitry Shmidt 2016-08-30 10:10:04 -07:00
commit 18cb0eedcc
12 changed files with 280 additions and 153 deletions

View file

@ -501,7 +501,7 @@ config DM_LOG_WRITES
If unsure, say N.
config DM_ANDROID_VERITY
bool "Android verity target support"
tristate "Android verity target support"
depends on DM_VERITY
depends on X509_CERTIFICATE_PARSER
depends on SYSTEM_TRUSTED_KEYRING
@ -509,6 +509,7 @@ config DM_ANDROID_VERITY
depends on KEYS
depends on ASYMMETRIC_KEY_TYPE
depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
depends on MD_LINEAR
---help---
This device-mapper target is virtually a VERITY target. This
target is setup by reading the metadata contents piggybacked

View file

@ -60,6 +60,7 @@ obj-$(CONFIG_DM_CACHE_SMQ) += dm-cache-smq.o
obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
obj-$(CONFIG_DM_ERA) += dm-era.o
obj-$(CONFIG_DM_LOG_WRITES) += dm-log-writes.o
obj-$(CONFIG_DM_ANDROID_VERITY) += dm-android-verity.o
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o
@ -68,7 +69,3 @@ endif
ifeq ($(CONFIG_DM_VERITY_FEC),y)
dm-verity-objs += dm-verity-fec.o
endif
ifeq ($(CONFIG_DM_ANDROID_VERITY),y)
dm-verity-objs += dm-android-verity.o
endif

View file

@ -66,6 +66,7 @@ int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
kfree(lc);
return ret;
}
EXPORT_SYMBOL_GPL(dm_linear_ctr);
void dm_linear_dtr(struct dm_target *ti)
{
@ -74,6 +75,7 @@ void dm_linear_dtr(struct dm_target *ti)
dm_put_device(ti, lc->dev);
kfree(lc);
}
EXPORT_SYMBOL_GPL(dm_linear_dtr);
static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
{
@ -98,6 +100,7 @@ int dm_linear_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
EXPORT_SYMBOL_GPL(dm_linear_map);
void dm_linear_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
@ -115,6 +118,7 @@ void dm_linear_status(struct dm_target *ti, status_type_t type,
break;
}
}
EXPORT_SYMBOL_GPL(dm_linear_status);
int dm_linear_prepare_ioctl(struct dm_target *ti,
struct block_device **bdev, fmode_t *mode)
@ -132,6 +136,7 @@ int dm_linear_prepare_ioctl(struct dm_target *ti,
return 1;
return 0;
}
EXPORT_SYMBOL_GPL(dm_linear_prepare_ioctl);
int dm_linear_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
@ -140,6 +145,7 @@ int dm_linear_iterate_devices(struct dm_target *ti,
return fn(ti, lc->dev, lc->start, ti->len, data);
}
EXPORT_SYMBOL_GPL(dm_linear_iterate_devices);
static struct target_type linear_target = {
.name = "linear",

View file

@ -592,6 +592,7 @@ int verity_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
EXPORT_SYMBOL_GPL(verity_map);
/*
* Status: V (valid) or C (corruption found)
@ -655,6 +656,7 @@ void verity_status(struct dm_target *ti, status_type_t type,
break;
}
}
EXPORT_SYMBOL_GPL(verity_status);
int verity_prepare_ioctl(struct dm_target *ti,
struct block_device **bdev, fmode_t *mode)
@ -668,6 +670,7 @@ int verity_prepare_ioctl(struct dm_target *ti,
return 1;
return 0;
}
EXPORT_SYMBOL_GPL(verity_prepare_ioctl);
int verity_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
@ -676,6 +679,7 @@ int verity_iterate_devices(struct dm_target *ti,
return fn(ti, v->data_dev, v->data_start, ti->len, data);
}
EXPORT_SYMBOL_GPL(verity_iterate_devices);
void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
@ -689,6 +693,7 @@ void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_min(limits, limits->logical_block_size);
}
EXPORT_SYMBOL_GPL(verity_io_hints);
void verity_dtr(struct dm_target *ti)
{
@ -719,6 +724,7 @@ void verity_dtr(struct dm_target *ti)
kfree(v);
}
EXPORT_SYMBOL_GPL(verity_dtr);
static int verity_alloc_zero_digest(struct dm_verity *v)
{
@ -1053,6 +1059,7 @@ bad:
return r;
}
EXPORT_SYMBOL_GPL(verity_ctr);
static struct target_type verity_target = {
.name = "verity",

View file

@ -10,30 +10,96 @@
struct percpu_rw_semaphore {
struct rcu_sync rss;
unsigned int __percpu *fast_read_ctr;
unsigned int __percpu *read_count;
struct rw_semaphore rw_sem;
atomic_t slow_read_ctr;
wait_queue_head_t write_waitq;
wait_queue_head_t writer;
int readers_block;
};
extern void percpu_down_read(struct percpu_rw_semaphore *);
extern int percpu_down_read_trylock(struct percpu_rw_semaphore *);
extern void percpu_up_read(struct percpu_rw_semaphore *);
extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
extern void __percpu_up_read(struct percpu_rw_semaphore *);
static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
{
might_sleep();
rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_);
preempt_disable();
/*
* We are in an RCU-sched read-side critical section, so the writer
* cannot both change sem->state from readers_fast and start checking
* counters while we are here. So if we see !sem->state, we know that
* the writer won't be checking until we're past the preempt_enable()
* and that one the synchronize_sched() is done, the writer will see
* anything we did within this RCU-sched read-size critical section.
*/
__this_cpu_inc(*sem->read_count);
if (unlikely(!rcu_sync_is_idle(&sem->rss)))
__percpu_down_read(sem, false); /* Unconditional memory barrier */
preempt_enable();
/*
* The barrier() from preempt_enable() prevents the compiler from
* bleeding the critical section out.
*/
}
static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
{
int ret = 1;
preempt_disable();
/*
* Same as in percpu_down_read().
*/
__this_cpu_inc(*sem->read_count);
if (unlikely(!rcu_sync_is_idle(&sem->rss)))
ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
preempt_enable();
/*
* The barrier() from preempt_enable() prevents the compiler from
* bleeding the critical section out.
*/
if (ret)
rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_);
return ret;
}
static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
{
/*
* The barrier() in preempt_disable() prevents the compiler from
* bleeding the critical section out.
*/
preempt_disable();
/*
* Same as in percpu_down_read().
*/
if (likely(rcu_sync_is_idle(&sem->rss)))
__this_cpu_dec(*sem->read_count);
else
__percpu_up_read(sem); /* Unconditional memory barrier */
preempt_enable();
rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
}
extern void percpu_down_write(struct percpu_rw_semaphore *);
extern void percpu_up_write(struct percpu_rw_semaphore *);
extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
const char *, struct lock_class_key *);
extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
#define percpu_init_rwsem(brw) \
#define percpu_init_rwsem(sem) \
({ \
static struct lock_class_key rwsem_key; \
__percpu_init_rwsem(brw, #brw, &rwsem_key); \
__percpu_init_rwsem(sem, #sem, &rwsem_key); \
})
#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)
static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,

View file

@ -59,6 +59,7 @@ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp)
}
extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type);
extern void rcu_sync_enter_start(struct rcu_sync *);
extern void rcu_sync_enter(struct rcu_sync *);
extern void rcu_sync_exit(struct rcu_sync *);
extern void rcu_sync_dtor(struct rcu_sync *);

View file

@ -5380,6 +5380,12 @@ int __init cgroup_init(void)
BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
/*
* The latency of the synchronize_sched() is too high for cgroups,
* avoid it at the cost of forcing all readers into the slow path.
*/
rcu_sync_enter_start(&cgroup_threadgroup_rwsem.rss);
mutex_lock(&cgroup_mutex);
/* Add init_css_set to the hash table */

View file

@ -1370,7 +1370,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->real_start_time = ktime_get_boot_ns();
p->io_context = NULL;
p->audit_context = NULL;
threadgroup_change_begin(current);
cgroup_fork(p);
#ifdef CONFIG_NUMA
p->mempolicy = mpol_dup(p->mempolicy);
@ -1522,6 +1521,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
INIT_LIST_HEAD(&p->thread_group);
p->task_works = NULL;
threadgroup_change_begin(current);
/*
* Ensure that the cgroup subsystem policies allow the new process to be
* forked. It should be noted the the new process's css_set can be changed
@ -1622,6 +1622,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
bad_fork_cancel_cgroup:
cgroup_cancel_fork(p, cgrp_ss_priv);
bad_fork_free_pid:
threadgroup_change_end(current);
if (pid != &init_struct_pid)
free_pid(pid);
bad_fork_cleanup_io:
@ -1652,7 +1653,6 @@ bad_fork_cleanup_policy:
mpol_put(p->mempolicy);
bad_fork_cleanup_threadgroup_lock:
#endif
threadgroup_change_end(current);
delayacct_tsk_free(p);
bad_fork_cleanup_count:
atomic_dec(&p->cred->user->processes);

View file

@ -8,151 +8,186 @@
#include <linux/sched.h>
#include <linux/errno.h>
int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
const char *name, struct lock_class_key *rwsem_key)
{
brw->fast_read_ctr = alloc_percpu(int);
if (unlikely(!brw->fast_read_ctr))
sem->read_count = alloc_percpu(int);
if (unlikely(!sem->read_count))
return -ENOMEM;
/* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
__init_rwsem(&brw->rw_sem, name, rwsem_key);
rcu_sync_init(&brw->rss, RCU_SCHED_SYNC);
atomic_set(&brw->slow_read_ctr, 0);
init_waitqueue_head(&brw->write_waitq);
rcu_sync_init(&sem->rss, RCU_SCHED_SYNC);
__init_rwsem(&sem->rw_sem, name, rwsem_key);
init_waitqueue_head(&sem->writer);
sem->readers_block = 0;
return 0;
}
EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
{
/*
* XXX: temporary kludge. The error path in alloc_super()
* assumes that percpu_free_rwsem() is safe after kzalloc().
*/
if (!brw->fast_read_ctr)
if (!sem->read_count)
return;
rcu_sync_dtor(&brw->rss);
free_percpu(brw->fast_read_ctr);
brw->fast_read_ctr = NULL; /* catch use after free bugs */
rcu_sync_dtor(&sem->rss);
free_percpu(sem->read_count);
sem->read_count = NULL; /* catch use after free bugs */
}
EXPORT_SYMBOL_GPL(percpu_free_rwsem);
/*
* This is the fast-path for down_read/up_read. If it succeeds we rely
* on the barriers provided by rcu_sync_enter/exit; see the comments in
* percpu_down_write() and percpu_up_write().
*
* If this helper fails the callers rely on the normal rw_semaphore and
* atomic_dec_and_test(), so in this case we have the necessary barriers.
*/
static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
{
bool success;
preempt_disable();
success = rcu_sync_is_idle(&brw->rss);
if (likely(success))
__this_cpu_add(*brw->fast_read_ctr, val);
preempt_enable();
return success;
}
/*
* Like the normal down_read() this is not recursive, the writer can
* come after the first percpu_down_read() and create the deadlock.
*
* Note: returns with lock_is_held(brw->rw_sem) == T for lockdep,
* percpu_up_read() does rwsem_release(). This pairs with the usage
* of ->rw_sem in percpu_down/up_write().
*/
void percpu_down_read(struct percpu_rw_semaphore *brw)
{
might_sleep();
rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
if (likely(update_fast_ctr(brw, +1)))
return;
/* Avoid rwsem_acquire_read() and rwsem_release() */
__down_read(&brw->rw_sem);
atomic_inc(&brw->slow_read_ctr);
__up_read(&brw->rw_sem);
}
EXPORT_SYMBOL_GPL(percpu_down_read);
int percpu_down_read_trylock(struct percpu_rw_semaphore *brw)
{
if (unlikely(!update_fast_ctr(brw, +1))) {
if (!__down_read_trylock(&brw->rw_sem))
return 0;
atomic_inc(&brw->slow_read_ctr);
__up_read(&brw->rw_sem);
}
rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 1, _RET_IP_);
return 1;
}
void percpu_up_read(struct percpu_rw_semaphore *brw)
{
rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_);
if (likely(update_fast_ctr(brw, -1)))
return;
/* false-positive is possible but harmless */
if (atomic_dec_and_test(&brw->slow_read_ctr))
wake_up_all(&brw->write_waitq);
}
EXPORT_SYMBOL_GPL(percpu_up_read);
static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
{
unsigned int sum = 0;
int cpu;
for_each_possible_cpu(cpu) {
sum += per_cpu(*brw->fast_read_ctr, cpu);
per_cpu(*brw->fast_read_ctr, cpu) = 0;
}
return sum;
}
void percpu_down_write(struct percpu_rw_semaphore *brw)
int __percpu_down_read(struct percpu_rw_semaphore *sem, int try)
{
/*
* Make rcu_sync_is_idle() == F and thus disable the fast-path in
* percpu_down_read() and percpu_up_read(), and wait for gp pass.
* Due to having preemption disabled the decrement happens on
* the same CPU as the increment, avoiding the
* increment-on-one-CPU-and-decrement-on-another problem.
*
* The latter synchronises us with the preceding readers which used
* the fast-past, so we can not miss the result of __this_cpu_add()
* or anything else inside their criticial sections.
* If the reader misses the writer's assignment of readers_block, then
* the writer is guaranteed to see the reader's increment.
*
* Conversely, any readers that increment their sem->read_count after
* the writer looks are guaranteed to see the readers_block value,
* which in turn means that they are guaranteed to immediately
* decrement their sem->read_count, so that it doesn't matter that the
* writer missed them.
*/
rcu_sync_enter(&brw->rss);
/* exclude other writers, and block the new readers completely */
down_write(&brw->rw_sem);
smp_mb(); /* A matches D */
/* nobody can use fast_read_ctr, move its sum into slow_read_ctr */
atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr);
/*
* If !readers_block the critical section starts here, matched by the
* release in percpu_up_write().
*/
if (likely(!smp_load_acquire(&sem->readers_block)))
return 1;
/* wait for all readers to complete their percpu_up_read() */
wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr));
/*
* Per the above comment; we still have preemption disabled and
* will thus decrement on the same CPU as we incremented.
*/
__percpu_up_read(sem);
if (try)
return 0;
/*
* We either call schedule() in the wait, or we'll fall through
* and reschedule on the preempt_enable() in percpu_down_read().
*/
preempt_enable_no_resched();
/*
* Avoid lockdep for the down/up_read() we already have them.
*/
__down_read(&sem->rw_sem);
this_cpu_inc(*sem->read_count);
__up_read(&sem->rw_sem);
preempt_disable();
return 1;
}
EXPORT_SYMBOL_GPL(__percpu_down_read);
void __percpu_up_read(struct percpu_rw_semaphore *sem)
{
smp_mb(); /* B matches C */
/*
* In other words, if they see our decrement (presumably to aggregate
* zero, as that is the only time it matters) they will also see our
* critical section.
*/
__this_cpu_dec(*sem->read_count);
/* Prod writer to recheck readers_active */
wake_up(&sem->writer);
}
EXPORT_SYMBOL_GPL(__percpu_up_read);
#define per_cpu_sum(var) \
({ \
typeof(var) __sum = 0; \
int cpu; \
compiletime_assert_atomic_type(__sum); \
for_each_possible_cpu(cpu) \
__sum += per_cpu(var, cpu); \
__sum; \
})
/*
* Return true if the modular sum of the sem->read_count per-CPU variable is
* zero. If this sum is zero, then it is stable due to the fact that if any
* newly arriving readers increment a given counter, they will immediately
* decrement that same counter.
*/
static bool readers_active_check(struct percpu_rw_semaphore *sem)
{
if (per_cpu_sum(*sem->read_count) != 0)
return false;
/*
* If we observed the decrement; ensure we see the entire critical
* section.
*/
smp_mb(); /* C matches B */
return true;
}
void percpu_down_write(struct percpu_rw_semaphore *sem)
{
/* Notify readers to take the slow path. */
rcu_sync_enter(&sem->rss);
down_write(&sem->rw_sem);
/*
* Notify new readers to block; up until now, and thus throughout the
* longish rcu_sync_enter() above, new readers could still come in.
*/
WRITE_ONCE(sem->readers_block, 1);
smp_mb(); /* D matches A */
/*
* If they don't see our writer of readers_block, then we are
* guaranteed to see their sem->read_count increment, and therefore
* will wait for them.
*/
/* Wait for all now active readers to complete. */
wait_event(sem->writer, readers_active_check(sem));
}
EXPORT_SYMBOL_GPL(percpu_down_write);
void percpu_up_write(struct percpu_rw_semaphore *brw)
void percpu_up_write(struct percpu_rw_semaphore *sem)
{
/* release the lock, but the readers can't use the fast-path */
up_write(&brw->rw_sem);
/*
* Enable the fast-path in percpu_down_read() and percpu_up_read()
* but only after another gp pass; this adds the necessary barrier
* to ensure the reader can't miss the changes done by us.
* Signal the writer is done, no fast path yet.
*
* One reason that we cannot just immediately flip to readers_fast is
* that new readers might fail to see the results of this writer's
* critical section.
*
* Therefore we force it through the slow path which guarantees an
* acquire and thereby guarantees the critical section's consistency.
*/
rcu_sync_exit(&brw->rss);
smp_store_release(&sem->readers_block, 0);
/*
* Release the write lock, this will allow readers back in the game.
*/
up_write(&sem->rw_sem);
/*
* Once this completes (at least one RCU-sched grace period hence) the
* reader fast path will be available again. Safe to use outside the
* exclusive write lock because its counting.
*/
rcu_sync_exit(&sem->rss);
}
EXPORT_SYMBOL_GPL(percpu_up_write);

View file

@ -82,6 +82,18 @@ void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
rsp->gp_type = type;
}
/**
* Must be called after rcu_sync_init() and before first use.
*
* Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
* pairs turn into NO-OPs.
*/
void rcu_sync_enter_start(struct rcu_sync *rsp)
{
rsp->gp_count++;
rsp->gp_state = GP_PASSED;
}
/**
* rcu_sync_enter() - Force readers onto slowpath
* @rsp: Pointer to rcu_sync structure to use for synchronization

View file

@ -98,7 +98,7 @@ static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!(type & ICMPV6_INFOMSG_MASK))
if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
ping_err(skb, offset, info);
ping_err(skb, offset, ntohl(info));
}
static int icmpv6_rcv(struct sk_buff *skb);

View file

@ -84,7 +84,7 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
struct icmp6hdr user_icmph;
int addr_type;
struct in6_addr *daddr;
int iif = 0;
int oif = 0;
struct flowi6 fl6;
int err;
int hlimit;
@ -106,25 +106,30 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (u->sin6_family != AF_INET6) {
return -EAFNOSUPPORT;
}
if (sk->sk_bound_dev_if &&
sk->sk_bound_dev_if != u->sin6_scope_id) {
return -EINVAL;
}
daddr = &(u->sin6_addr);
iif = u->sin6_scope_id;
if (__ipv6_addr_needs_scope_id(ipv6_addr_type(daddr)))
oif = u->sin6_scope_id;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = &sk->sk_v6_daddr;
}
if (!iif)
iif = sk->sk_bound_dev_if;
if (!oif)
oif = sk->sk_bound_dev_if;
if (!oif)
oif = np->sticky_pktinfo.ipi6_ifindex;
if (!oif && ipv6_addr_is_multicast(daddr))
oif = np->mcast_oif;
else if (!oif)
oif = np->ucast_oif;
addr_type = ipv6_addr_type(daddr);
if (__ipv6_addr_needs_scope_id(addr_type) && !iif)
return -EINVAL;
if (addr_type & IPV6_ADDR_MAPPED)
if ((__ipv6_addr_needs_scope_id(addr_type) && !oif) ||
(addr_type & IPV6_ADDR_MAPPED) ||
(oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if))
return -EINVAL;
/* TODO: use ip6_datagram_send_ctl to get options from cmsg */
@ -134,17 +139,13 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl6.flowi6_proto = IPPROTO_ICMPV6;
fl6.saddr = np->saddr;
fl6.daddr = *daddr;
fl6.flowi6_oif = oif;
fl6.flowi6_mark = sk->sk_mark;
fl6.flowi6_uid = sock_i_uid(sk);
fl6.fl6_icmp_type = user_icmph.icmp6_type;
fl6.fl6_icmp_code = user_icmph.icmp6_code;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr);
if (IS_ERR(dst))
return PTR_ERR(dst);
@ -154,11 +155,6 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (!np)
return -EBADF;
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
pfh.icmph.type = user_icmph.icmp6_type;
pfh.icmph.code = user_icmph.icmp6_code;
pfh.icmph.checksum = 0;