Merge branch 'tipc-next'
Ying Xue says: ==================== standardize TIPC SKB queue operations Now the following SKB queues are created and maintained within internal TIPC stack: - link transmission queue - link deferred queue - link receive queue - socket outgoing packet chain - name table outgoing packet chain In order to manage above queues, TIPC stack declares a sk_buff pointer for each queue to record its head, and directly modifies "prev" and "next" SKB pointers of SKB structure when inserting or deleting a SKB to or from the queue. As these operations are pretty complex, they easily involve fatal mistakes. If these sk_buff pointers are replaced with sk_buff_head instances as queue heads and corresponding generic SKB list APIs are used to manage them, the entire TIPC code would become quite clean and readable. But before make the change, we need to clean up below redundant functionalities: - remove node subscribe infrastructure - remove protocol message queue - remove retransmission queue - clean up process of pushing packets in link layer ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
8b7f8a9990
18 changed files with 497 additions and 741 deletions
|
@ -7,8 +7,8 @@ obj-$(CONFIG_TIPC) := tipc.o
|
||||||
tipc-y += addr.o bcast.o bearer.o config.o \
|
tipc-y += addr.o bcast.o bearer.o config.o \
|
||||||
core.o link.o discover.o msg.o \
|
core.o link.o discover.o msg.o \
|
||||||
name_distr.o subscr.o name_table.o net.o \
|
name_distr.o subscr.o name_table.o net.o \
|
||||||
netlink.o node.o node_subscr.o \
|
netlink.o node.o socket.o log.o eth_media.o \
|
||||||
socket.o log.o eth_media.o server.o
|
server.o
|
||||||
|
|
||||||
tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o
|
tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o
|
||||||
tipc-$(CONFIG_SYSCTL) += sysctl.o
|
tipc-$(CONFIG_SYSCTL) += sysctl.o
|
||||||
|
|
109
net/tipc/bcast.c
109
net/tipc/bcast.c
|
@ -217,12 +217,13 @@ struct tipc_node *tipc_bclink_retransmit_to(void)
|
||||||
*/
|
*/
|
||||||
static void bclink_retransmit_pkt(u32 after, u32 to)
|
static void bclink_retransmit_pkt(u32 after, u32 to)
|
||||||
{
|
{
|
||||||
struct sk_buff *buf;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
buf = bcl->first_out;
|
skb_queue_walk(&bcl->outqueue, skb) {
|
||||||
while (buf && less_eq(buf_seqno(buf), after))
|
if (more(buf_seqno(skb), after))
|
||||||
buf = buf->next;
|
break;
|
||||||
tipc_link_retransmit(bcl, buf, mod(to - after));
|
}
|
||||||
|
tipc_link_retransmit(bcl, skb, mod(to - after));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -245,14 +246,14 @@ void tipc_bclink_wakeup_users(void)
|
||||||
*/
|
*/
|
||||||
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
|
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
|
||||||
{
|
{
|
||||||
struct sk_buff *crs;
|
struct sk_buff *skb, *tmp;
|
||||||
struct sk_buff *next;
|
struct sk_buff *next;
|
||||||
unsigned int released = 0;
|
unsigned int released = 0;
|
||||||
|
|
||||||
tipc_bclink_lock();
|
tipc_bclink_lock();
|
||||||
/* Bail out if tx queue is empty (no clean up is required) */
|
/* Bail out if tx queue is empty (no clean up is required) */
|
||||||
crs = bcl->first_out;
|
skb = skb_peek(&bcl->outqueue);
|
||||||
if (!crs)
|
if (!skb)
|
||||||
goto exit;
|
goto exit;
|
||||||
|
|
||||||
/* Determine which messages need to be acknowledged */
|
/* Determine which messages need to be acknowledged */
|
||||||
|
@ -271,43 +272,43 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
|
||||||
* Bail out if specified sequence number does not correspond
|
* Bail out if specified sequence number does not correspond
|
||||||
* to a message that has been sent and not yet acknowledged
|
* to a message that has been sent and not yet acknowledged
|
||||||
*/
|
*/
|
||||||
if (less(acked, buf_seqno(crs)) ||
|
if (less(acked, buf_seqno(skb)) ||
|
||||||
less(bcl->fsm_msg_cnt, acked) ||
|
less(bcl->fsm_msg_cnt, acked) ||
|
||||||
less_eq(acked, n_ptr->bclink.acked))
|
less_eq(acked, n_ptr->bclink.acked))
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Skip over packets that node has previously acknowledged */
|
/* Skip over packets that node has previously acknowledged */
|
||||||
while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked))
|
skb_queue_walk(&bcl->outqueue, skb) {
|
||||||
crs = crs->next;
|
if (more(buf_seqno(skb), n_ptr->bclink.acked))
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
/* Update packets that node is now acknowledging */
|
/* Update packets that node is now acknowledging */
|
||||||
|
skb_queue_walk_from_safe(&bcl->outqueue, skb, tmp) {
|
||||||
|
if (more(buf_seqno(skb), acked))
|
||||||
|
break;
|
||||||
|
|
||||||
while (crs && less_eq(buf_seqno(crs), acked)) {
|
next = tipc_skb_queue_next(&bcl->outqueue, skb);
|
||||||
next = crs->next;
|
if (skb != bcl->next_out) {
|
||||||
|
bcbuf_decr_acks(skb);
|
||||||
if (crs != bcl->next_out)
|
} else {
|
||||||
bcbuf_decr_acks(crs);
|
bcbuf_set_acks(skb, 0);
|
||||||
else {
|
|
||||||
bcbuf_set_acks(crs, 0);
|
|
||||||
bcl->next_out = next;
|
bcl->next_out = next;
|
||||||
bclink_set_last_sent();
|
bclink_set_last_sent();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bcbuf_acks(crs) == 0) {
|
if (bcbuf_acks(skb) == 0) {
|
||||||
bcl->first_out = next;
|
__skb_unlink(skb, &bcl->outqueue);
|
||||||
bcl->out_queue_size--;
|
kfree_skb(skb);
|
||||||
kfree_skb(crs);
|
|
||||||
released = 1;
|
released = 1;
|
||||||
}
|
}
|
||||||
crs = next;
|
|
||||||
}
|
}
|
||||||
n_ptr->bclink.acked = acked;
|
n_ptr->bclink.acked = acked;
|
||||||
|
|
||||||
/* Try resolving broadcast link congestion, if necessary */
|
/* Try resolving broadcast link congestion, if necessary */
|
||||||
|
|
||||||
if (unlikely(bcl->next_out)) {
|
if (unlikely(bcl->next_out)) {
|
||||||
tipc_link_push_queue(bcl);
|
tipc_link_push_packets(bcl);
|
||||||
bclink_set_last_sent();
|
bclink_set_last_sent();
|
||||||
}
|
}
|
||||||
if (unlikely(released && !skb_queue_empty(&bcl->waiting_sks)))
|
if (unlikely(released && !skb_queue_empty(&bcl->waiting_sks)))
|
||||||
|
@ -327,19 +328,16 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
|
||||||
struct sk_buff *buf;
|
struct sk_buff *buf;
|
||||||
|
|
||||||
/* Ignore "stale" link state info */
|
/* Ignore "stale" link state info */
|
||||||
|
|
||||||
if (less_eq(last_sent, n_ptr->bclink.last_in))
|
if (less_eq(last_sent, n_ptr->bclink.last_in))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Update link synchronization state; quit if in sync */
|
/* Update link synchronization state; quit if in sync */
|
||||||
|
|
||||||
bclink_update_last_sent(n_ptr, last_sent);
|
bclink_update_last_sent(n_ptr, last_sent);
|
||||||
|
|
||||||
if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
|
if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Update out-of-sync state; quit if loss is still unconfirmed */
|
/* Update out-of-sync state; quit if loss is still unconfirmed */
|
||||||
|
|
||||||
if ((++n_ptr->bclink.oos_state) == 1) {
|
if ((++n_ptr->bclink.oos_state) == 1) {
|
||||||
if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
|
if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
|
||||||
return;
|
return;
|
||||||
|
@ -347,15 +345,15 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Don't NACK if one has been recently sent (or seen) */
|
/* Don't NACK if one has been recently sent (or seen) */
|
||||||
|
|
||||||
if (n_ptr->bclink.oos_state & 0x1)
|
if (n_ptr->bclink.oos_state & 0x1)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Send NACK */
|
/* Send NACK */
|
||||||
|
|
||||||
buf = tipc_buf_acquire(INT_H_SIZE);
|
buf = tipc_buf_acquire(INT_H_SIZE);
|
||||||
if (buf) {
|
if (buf) {
|
||||||
struct tipc_msg *msg = buf_msg(buf);
|
struct tipc_msg *msg = buf_msg(buf);
|
||||||
|
struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue);
|
||||||
|
u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
|
||||||
|
|
||||||
tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
|
tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
|
||||||
INT_H_SIZE, n_ptr->addr);
|
INT_H_SIZE, n_ptr->addr);
|
||||||
|
@ -363,9 +361,7 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
|
||||||
msg_set_mc_netid(msg, tipc_net_id);
|
msg_set_mc_netid(msg, tipc_net_id);
|
||||||
msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
|
msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
|
||||||
msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
|
msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
|
||||||
msg_set_bcgap_to(msg, n_ptr->bclink.deferred_head
|
msg_set_bcgap_to(msg, to);
|
||||||
? buf_seqno(n_ptr->bclink.deferred_head) - 1
|
|
||||||
: n_ptr->bclink.last_sent);
|
|
||||||
|
|
||||||
tipc_bclink_lock();
|
tipc_bclink_lock();
|
||||||
tipc_bearer_send(MAX_BEARERS, buf, NULL);
|
tipc_bearer_send(MAX_BEARERS, buf, NULL);
|
||||||
|
@ -402,20 +398,20 @@ static void bclink_peek_nack(struct tipc_msg *msg)
|
||||||
|
|
||||||
/* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster
|
/* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster
|
||||||
* and to identified node local sockets
|
* and to identified node local sockets
|
||||||
* @buf: chain of buffers containing message
|
* @list: chain of buffers containing message
|
||||||
* Consumes the buffer chain, except when returning -ELINKCONG
|
* Consumes the buffer chain, except when returning -ELINKCONG
|
||||||
* Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
|
* Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
|
||||||
*/
|
*/
|
||||||
int tipc_bclink_xmit(struct sk_buff *buf)
|
int tipc_bclink_xmit(struct sk_buff_head *list)
|
||||||
{
|
{
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
int bc = 0;
|
int bc = 0;
|
||||||
struct sk_buff *clbuf;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
/* Prepare clone of message for local node */
|
/* Prepare clone of message for local node */
|
||||||
clbuf = tipc_msg_reassemble(buf);
|
skb = tipc_msg_reassemble(list);
|
||||||
if (unlikely(!clbuf)) {
|
if (unlikely(!skb)) {
|
||||||
kfree_skb_list(buf);
|
__skb_queue_purge(list);
|
||||||
return -EHOSTUNREACH;
|
return -EHOSTUNREACH;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -423,11 +419,13 @@ int tipc_bclink_xmit(struct sk_buff *buf)
|
||||||
if (likely(bclink)) {
|
if (likely(bclink)) {
|
||||||
tipc_bclink_lock();
|
tipc_bclink_lock();
|
||||||
if (likely(bclink->bcast_nodes.count)) {
|
if (likely(bclink->bcast_nodes.count)) {
|
||||||
rc = __tipc_link_xmit(bcl, buf);
|
rc = __tipc_link_xmit(bcl, list);
|
||||||
if (likely(!rc)) {
|
if (likely(!rc)) {
|
||||||
|
u32 len = skb_queue_len(&bcl->outqueue);
|
||||||
|
|
||||||
bclink_set_last_sent();
|
bclink_set_last_sent();
|
||||||
bcl->stats.queue_sz_counts++;
|
bcl->stats.queue_sz_counts++;
|
||||||
bcl->stats.accu_queue_sz += bcl->out_queue_size;
|
bcl->stats.accu_queue_sz += len;
|
||||||
}
|
}
|
||||||
bc = 1;
|
bc = 1;
|
||||||
}
|
}
|
||||||
|
@ -435,13 +433,13 @@ int tipc_bclink_xmit(struct sk_buff *buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(!bc))
|
if (unlikely(!bc))
|
||||||
kfree_skb_list(buf);
|
__skb_queue_purge(list);
|
||||||
|
|
||||||
/* Deliver message clone */
|
/* Deliver message clone */
|
||||||
if (likely(!rc))
|
if (likely(!rc))
|
||||||
tipc_sk_mcast_rcv(clbuf);
|
tipc_sk_mcast_rcv(skb);
|
||||||
else
|
else
|
||||||
kfree_skb(clbuf);
|
kfree_skb(skb);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -462,7 +460,6 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
|
||||||
* Unicast an ACK periodically, ensuring that
|
* Unicast an ACK periodically, ensuring that
|
||||||
* all nodes in the cluster don't ACK at the same time
|
* all nodes in the cluster don't ACK at the same time
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
|
if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
|
||||||
tipc_link_proto_xmit(node->active_links[node->addr & 1],
|
tipc_link_proto_xmit(node->active_links[node->addr & 1],
|
||||||
STATE_MSG, 0, 0, 0, 0, 0);
|
STATE_MSG, 0, 0, 0, 0, 0);
|
||||||
|
@ -484,7 +481,6 @@ void tipc_bclink_rcv(struct sk_buff *buf)
|
||||||
int deferred = 0;
|
int deferred = 0;
|
||||||
|
|
||||||
/* Screen out unwanted broadcast messages */
|
/* Screen out unwanted broadcast messages */
|
||||||
|
|
||||||
if (msg_mc_netid(msg) != tipc_net_id)
|
if (msg_mc_netid(msg) != tipc_net_id)
|
||||||
goto exit;
|
goto exit;
|
||||||
|
|
||||||
|
@ -497,7 +493,6 @@ void tipc_bclink_rcv(struct sk_buff *buf)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
/* Handle broadcast protocol message */
|
/* Handle broadcast protocol message */
|
||||||
|
|
||||||
if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
|
if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
|
||||||
if (msg_type(msg) != STATE_MSG)
|
if (msg_type(msg) != STATE_MSG)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
@ -518,14 +513,12 @@ void tipc_bclink_rcv(struct sk_buff *buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handle in-sequence broadcast message */
|
/* Handle in-sequence broadcast message */
|
||||||
|
|
||||||
seqno = msg_seqno(msg);
|
seqno = msg_seqno(msg);
|
||||||
next_in = mod(node->bclink.last_in + 1);
|
next_in = mod(node->bclink.last_in + 1);
|
||||||
|
|
||||||
if (likely(seqno == next_in)) {
|
if (likely(seqno == next_in)) {
|
||||||
receive:
|
receive:
|
||||||
/* Deliver message to destination */
|
/* Deliver message to destination */
|
||||||
|
|
||||||
if (likely(msg_isdata(msg))) {
|
if (likely(msg_isdata(msg))) {
|
||||||
tipc_bclink_lock();
|
tipc_bclink_lock();
|
||||||
bclink_accept_pkt(node, seqno);
|
bclink_accept_pkt(node, seqno);
|
||||||
|
@ -574,7 +567,6 @@ receive:
|
||||||
buf = NULL;
|
buf = NULL;
|
||||||
|
|
||||||
/* Determine new synchronization state */
|
/* Determine new synchronization state */
|
||||||
|
|
||||||
tipc_node_lock(node);
|
tipc_node_lock(node);
|
||||||
if (unlikely(!tipc_node_is_up(node)))
|
if (unlikely(!tipc_node_is_up(node)))
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
@ -582,33 +574,26 @@ receive:
|
||||||
if (node->bclink.last_in == node->bclink.last_sent)
|
if (node->bclink.last_in == node->bclink.last_sent)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
if (!node->bclink.deferred_head) {
|
if (skb_queue_empty(&node->bclink.deferred_queue)) {
|
||||||
node->bclink.oos_state = 1;
|
node->bclink.oos_state = 1;
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
msg = buf_msg(node->bclink.deferred_head);
|
msg = buf_msg(skb_peek(&node->bclink.deferred_queue));
|
||||||
seqno = msg_seqno(msg);
|
seqno = msg_seqno(msg);
|
||||||
next_in = mod(next_in + 1);
|
next_in = mod(next_in + 1);
|
||||||
if (seqno != next_in)
|
if (seqno != next_in)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
/* Take in-sequence message from deferred queue & deliver it */
|
/* Take in-sequence message from deferred queue & deliver it */
|
||||||
|
buf = __skb_dequeue(&node->bclink.deferred_queue);
|
||||||
buf = node->bclink.deferred_head;
|
|
||||||
node->bclink.deferred_head = buf->next;
|
|
||||||
buf->next = NULL;
|
|
||||||
node->bclink.deferred_size--;
|
|
||||||
goto receive;
|
goto receive;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handle out-of-sequence broadcast message */
|
/* Handle out-of-sequence broadcast message */
|
||||||
|
|
||||||
if (less(next_in, seqno)) {
|
if (less(next_in, seqno)) {
|
||||||
deferred = tipc_link_defer_pkt(&node->bclink.deferred_head,
|
deferred = tipc_link_defer_pkt(&node->bclink.deferred_queue,
|
||||||
&node->bclink.deferred_tail,
|
|
||||||
buf);
|
buf);
|
||||||
node->bclink.deferred_size += deferred;
|
|
||||||
bclink_update_last_sent(node, seqno);
|
bclink_update_last_sent(node, seqno);
|
||||||
buf = NULL;
|
buf = NULL;
|
||||||
}
|
}
|
||||||
|
@ -963,6 +948,8 @@ int tipc_bclink_init(void)
|
||||||
sprintf(bcbearer->media.name, "tipc-broadcast");
|
sprintf(bcbearer->media.name, "tipc-broadcast");
|
||||||
|
|
||||||
spin_lock_init(&bclink->lock);
|
spin_lock_init(&bclink->lock);
|
||||||
|
__skb_queue_head_init(&bcl->outqueue);
|
||||||
|
__skb_queue_head_init(&bcl->deferred_queue);
|
||||||
__skb_queue_head_init(&bcl->waiting_sks);
|
__skb_queue_head_init(&bcl->waiting_sks);
|
||||||
bcl->next_out_no = 1;
|
bcl->next_out_no = 1;
|
||||||
spin_lock_init(&bclink->node.lock);
|
spin_lock_init(&bclink->node.lock);
|
||||||
|
|
|
@ -100,7 +100,7 @@ int tipc_bclink_reset_stats(void);
|
||||||
int tipc_bclink_set_queue_limits(u32 limit);
|
int tipc_bclink_set_queue_limits(u32 limit);
|
||||||
void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action);
|
void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action);
|
||||||
uint tipc_bclink_get_mtu(void);
|
uint tipc_bclink_get_mtu(void);
|
||||||
int tipc_bclink_xmit(struct sk_buff *buf);
|
int tipc_bclink_xmit(struct sk_buff_head *list);
|
||||||
void tipc_bclink_wakeup_users(void);
|
void tipc_bclink_wakeup_users(void);
|
||||||
int tipc_nl_add_bc_link(struct tipc_nl_msg *msg);
|
int tipc_nl_add_bc_link(struct tipc_nl_msg *msg);
|
||||||
|
|
||||||
|
|
|
@ -165,7 +165,7 @@ extern struct tipc_bearer __rcu *bearer_list[];
|
||||||
* TIPC routines available to supported media types
|
* TIPC routines available to supported media types
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void tipc_rcv(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
|
void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *tb_ptr);
|
||||||
int tipc_enable_bearer(const char *bearer_name, u32 disc_domain, u32 priority);
|
int tipc_enable_bearer(const char *bearer_name, u32 disc_domain, u32 priority);
|
||||||
int tipc_disable_bearer(const char *name);
|
int tipc_disable_bearer(const char *name);
|
||||||
|
|
||||||
|
|
|
@ -192,6 +192,7 @@ struct tipc_skb_cb {
|
||||||
struct sk_buff *tail;
|
struct sk_buff *tail;
|
||||||
bool deferred;
|
bool deferred;
|
||||||
bool wakeup_pending;
|
bool wakeup_pending;
|
||||||
|
bool bundling;
|
||||||
u16 chain_sz;
|
u16 chain_sz;
|
||||||
u16 chain_imp;
|
u16 chain_imp;
|
||||||
};
|
};
|
||||||
|
|
514
net/tipc/link.c
514
net/tipc/link.c
|
@ -149,18 +149,6 @@ static void link_init_max_pkt(struct tipc_link *l_ptr)
|
||||||
l_ptr->max_pkt_probes = 0;
|
l_ptr->max_pkt_probes = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 link_next_sent(struct tipc_link *l_ptr)
|
|
||||||
{
|
|
||||||
if (l_ptr->next_out)
|
|
||||||
return buf_seqno(l_ptr->next_out);
|
|
||||||
return mod(l_ptr->next_out_no);
|
|
||||||
}
|
|
||||||
|
|
||||||
static u32 link_last_sent(struct tipc_link *l_ptr)
|
|
||||||
{
|
|
||||||
return mod(link_next_sent(l_ptr) - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Simple non-static link routines (i.e. referenced outside this file)
|
* Simple non-static link routines (i.e. referenced outside this file)
|
||||||
*/
|
*/
|
||||||
|
@ -183,14 +171,17 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
|
||||||
*/
|
*/
|
||||||
static void link_timeout(struct tipc_link *l_ptr)
|
static void link_timeout(struct tipc_link *l_ptr)
|
||||||
{
|
{
|
||||||
|
struct sk_buff *skb;
|
||||||
|
|
||||||
tipc_node_lock(l_ptr->owner);
|
tipc_node_lock(l_ptr->owner);
|
||||||
|
|
||||||
/* update counters used in statistical profiling of send traffic */
|
/* update counters used in statistical profiling of send traffic */
|
||||||
l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
|
l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->outqueue);
|
||||||
l_ptr->stats.queue_sz_counts++;
|
l_ptr->stats.queue_sz_counts++;
|
||||||
|
|
||||||
if (l_ptr->first_out) {
|
skb = skb_peek(&l_ptr->outqueue);
|
||||||
struct tipc_msg *msg = buf_msg(l_ptr->first_out);
|
if (skb) {
|
||||||
|
struct tipc_msg *msg = buf_msg(skb);
|
||||||
u32 length = msg_size(msg);
|
u32 length = msg_size(msg);
|
||||||
|
|
||||||
if ((msg_user(msg) == MSG_FRAGMENTER) &&
|
if ((msg_user(msg) == MSG_FRAGMENTER) &&
|
||||||
|
@ -218,11 +209,10 @@ static void link_timeout(struct tipc_link *l_ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* do all other link processing performed on a periodic basis */
|
/* do all other link processing performed on a periodic basis */
|
||||||
|
|
||||||
link_state_event(l_ptr, TIMEOUT_EVT);
|
link_state_event(l_ptr, TIMEOUT_EVT);
|
||||||
|
|
||||||
if (l_ptr->next_out)
|
if (l_ptr->next_out)
|
||||||
tipc_link_push_queue(l_ptr);
|
tipc_link_push_packets(l_ptr);
|
||||||
|
|
||||||
tipc_node_unlock(l_ptr->owner);
|
tipc_node_unlock(l_ptr->owner);
|
||||||
}
|
}
|
||||||
|
@ -301,6 +291,8 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
|
||||||
link_init_max_pkt(l_ptr);
|
link_init_max_pkt(l_ptr);
|
||||||
|
|
||||||
l_ptr->next_out_no = 1;
|
l_ptr->next_out_no = 1;
|
||||||
|
__skb_queue_head_init(&l_ptr->outqueue);
|
||||||
|
__skb_queue_head_init(&l_ptr->deferred_queue);
|
||||||
__skb_queue_head_init(&l_ptr->waiting_sks);
|
__skb_queue_head_init(&l_ptr->waiting_sks);
|
||||||
|
|
||||||
link_reset_statistics(l_ptr);
|
link_reset_statistics(l_ptr);
|
||||||
|
@ -379,29 +371,18 @@ static bool link_schedule_user(struct tipc_link *link, u32 oport,
|
||||||
*/
|
*/
|
||||||
static void link_prepare_wakeup(struct tipc_link *link)
|
static void link_prepare_wakeup(struct tipc_link *link)
|
||||||
{
|
{
|
||||||
struct sk_buff_head *wq = &link->waiting_sks;
|
uint pend_qsz = skb_queue_len(&link->outqueue);
|
||||||
struct sk_buff *buf;
|
struct sk_buff *skb, *tmp;
|
||||||
uint pend_qsz = link->out_queue_size;
|
|
||||||
|
|
||||||
for (buf = skb_peek(wq); buf; buf = skb_peek(wq)) {
|
skb_queue_walk_safe(&link->waiting_sks, skb, tmp) {
|
||||||
if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(buf)->chain_imp])
|
if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp])
|
||||||
break;
|
break;
|
||||||
pend_qsz += TIPC_SKB_CB(buf)->chain_sz;
|
pend_qsz += TIPC_SKB_CB(skb)->chain_sz;
|
||||||
__skb_queue_tail(&link->owner->waiting_sks, __skb_dequeue(wq));
|
__skb_unlink(skb, &link->waiting_sks);
|
||||||
|
__skb_queue_tail(&link->owner->waiting_sks, skb);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* link_release_outqueue - purge link's outbound message queue
|
|
||||||
* @l_ptr: pointer to link
|
|
||||||
*/
|
|
||||||
static void link_release_outqueue(struct tipc_link *l_ptr)
|
|
||||||
{
|
|
||||||
kfree_skb_list(l_ptr->first_out);
|
|
||||||
l_ptr->first_out = NULL;
|
|
||||||
l_ptr->out_queue_size = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tipc_link_reset_fragments - purge link's inbound message fragments queue
|
* tipc_link_reset_fragments - purge link's inbound message fragments queue
|
||||||
* @l_ptr: pointer to link
|
* @l_ptr: pointer to link
|
||||||
|
@ -418,11 +399,9 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr)
|
||||||
*/
|
*/
|
||||||
void tipc_link_purge_queues(struct tipc_link *l_ptr)
|
void tipc_link_purge_queues(struct tipc_link *l_ptr)
|
||||||
{
|
{
|
||||||
kfree_skb_list(l_ptr->oldest_deferred_in);
|
__skb_queue_purge(&l_ptr->deferred_queue);
|
||||||
kfree_skb_list(l_ptr->first_out);
|
__skb_queue_purge(&l_ptr->outqueue);
|
||||||
tipc_link_reset_fragments(l_ptr);
|
tipc_link_reset_fragments(l_ptr);
|
||||||
kfree_skb(l_ptr->proto_msg_queue);
|
|
||||||
l_ptr->proto_msg_queue = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void tipc_link_reset(struct tipc_link *l_ptr)
|
void tipc_link_reset(struct tipc_link *l_ptr)
|
||||||
|
@ -454,25 +433,16 @@ void tipc_link_reset(struct tipc_link *l_ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Clean up all queues: */
|
/* Clean up all queues: */
|
||||||
link_release_outqueue(l_ptr);
|
__skb_queue_purge(&l_ptr->outqueue);
|
||||||
kfree_skb(l_ptr->proto_msg_queue);
|
__skb_queue_purge(&l_ptr->deferred_queue);
|
||||||
l_ptr->proto_msg_queue = NULL;
|
|
||||||
kfree_skb_list(l_ptr->oldest_deferred_in);
|
|
||||||
if (!skb_queue_empty(&l_ptr->waiting_sks)) {
|
if (!skb_queue_empty(&l_ptr->waiting_sks)) {
|
||||||
skb_queue_splice_init(&l_ptr->waiting_sks, &owner->waiting_sks);
|
skb_queue_splice_init(&l_ptr->waiting_sks, &owner->waiting_sks);
|
||||||
owner->action_flags |= TIPC_WAKEUP_USERS;
|
owner->action_flags |= TIPC_WAKEUP_USERS;
|
||||||
}
|
}
|
||||||
l_ptr->retransm_queue_head = 0;
|
|
||||||
l_ptr->retransm_queue_size = 0;
|
|
||||||
l_ptr->last_out = NULL;
|
|
||||||
l_ptr->first_out = NULL;
|
|
||||||
l_ptr->next_out = NULL;
|
l_ptr->next_out = NULL;
|
||||||
l_ptr->unacked_window = 0;
|
l_ptr->unacked_window = 0;
|
||||||
l_ptr->checkpoint = 1;
|
l_ptr->checkpoint = 1;
|
||||||
l_ptr->next_out_no = 1;
|
l_ptr->next_out_no = 1;
|
||||||
l_ptr->deferred_inqueue_sz = 0;
|
|
||||||
l_ptr->oldest_deferred_in = NULL;
|
|
||||||
l_ptr->newest_deferred_in = NULL;
|
|
||||||
l_ptr->fsm_msg_cnt = 0;
|
l_ptr->fsm_msg_cnt = 0;
|
||||||
l_ptr->stale_count = 0;
|
l_ptr->stale_count = 0;
|
||||||
link_reset_statistics(l_ptr);
|
link_reset_statistics(l_ptr);
|
||||||
|
@ -694,9 +664,10 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
|
||||||
* - For all other messages we discard the buffer and return -EHOSTUNREACH
|
* - For all other messages we discard the buffer and return -EHOSTUNREACH
|
||||||
* - For TIPC internal messages we also reset the link
|
* - For TIPC internal messages we also reset the link
|
||||||
*/
|
*/
|
||||||
static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf)
|
static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list)
|
||||||
{
|
{
|
||||||
struct tipc_msg *msg = buf_msg(buf);
|
struct sk_buff *skb = skb_peek(list);
|
||||||
|
struct tipc_msg *msg = buf_msg(skb);
|
||||||
uint imp = tipc_msg_tot_importance(msg);
|
uint imp = tipc_msg_tot_importance(msg);
|
||||||
u32 oport = msg_tot_origport(msg);
|
u32 oport = msg_tot_origport(msg);
|
||||||
|
|
||||||
|
@ -709,30 +680,30 @@ static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf)
|
||||||
goto drop;
|
goto drop;
|
||||||
if (unlikely(msg_reroute_cnt(msg)))
|
if (unlikely(msg_reroute_cnt(msg)))
|
||||||
goto drop;
|
goto drop;
|
||||||
if (TIPC_SKB_CB(buf)->wakeup_pending)
|
if (TIPC_SKB_CB(skb)->wakeup_pending)
|
||||||
return -ELINKCONG;
|
return -ELINKCONG;
|
||||||
if (link_schedule_user(link, oport, TIPC_SKB_CB(buf)->chain_sz, imp))
|
if (link_schedule_user(link, oport, skb_queue_len(list), imp))
|
||||||
return -ELINKCONG;
|
return -ELINKCONG;
|
||||||
drop:
|
drop:
|
||||||
kfree_skb_list(buf);
|
__skb_queue_purge(list);
|
||||||
return -EHOSTUNREACH;
|
return -EHOSTUNREACH;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
|
* __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
|
||||||
* @link: link to use
|
* @link: link to use
|
||||||
* @buf: chain of buffers containing message
|
* @list: chain of buffers containing message
|
||||||
|
*
|
||||||
* Consumes the buffer chain, except when returning -ELINKCONG
|
* Consumes the buffer chain, except when returning -ELINKCONG
|
||||||
* Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
|
* Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
|
||||||
* user data messages) or -EHOSTUNREACH (all other messages/senders)
|
* user data messages) or -EHOSTUNREACH (all other messages/senders)
|
||||||
* Only the socket functions tipc_send_stream() and tipc_send_packet() need
|
* Only the socket functions tipc_send_stream() and tipc_send_packet() need
|
||||||
* to act on the return value, since they may need to do more send attempts.
|
* to act on the return value, since they may need to do more send attempts.
|
||||||
*/
|
*/
|
||||||
int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf)
|
int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list)
|
||||||
{
|
{
|
||||||
struct tipc_msg *msg = buf_msg(buf);
|
struct tipc_msg *msg = buf_msg(skb_peek(list));
|
||||||
uint psz = msg_size(msg);
|
uint psz = msg_size(msg);
|
||||||
uint qsz = link->out_queue_size;
|
|
||||||
uint sndlim = link->queue_limit[0];
|
uint sndlim = link->queue_limit[0];
|
||||||
uint imp = tipc_msg_tot_importance(msg);
|
uint imp = tipc_msg_tot_importance(msg);
|
||||||
uint mtu = link->max_pkt;
|
uint mtu = link->max_pkt;
|
||||||
|
@ -740,71 +711,83 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf)
|
||||||
uint seqno = link->next_out_no;
|
uint seqno = link->next_out_no;
|
||||||
uint bc_last_in = link->owner->bclink.last_in;
|
uint bc_last_in = link->owner->bclink.last_in;
|
||||||
struct tipc_media_addr *addr = &link->media_addr;
|
struct tipc_media_addr *addr = &link->media_addr;
|
||||||
struct sk_buff *next = buf->next;
|
struct sk_buff_head *outqueue = &link->outqueue;
|
||||||
|
struct sk_buff *skb, *tmp;
|
||||||
|
|
||||||
/* Match queue limits against msg importance: */
|
/* Match queue limits against msg importance: */
|
||||||
if (unlikely(qsz >= link->queue_limit[imp]))
|
if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp]))
|
||||||
return tipc_link_cong(link, buf);
|
return tipc_link_cong(link, list);
|
||||||
|
|
||||||
/* Has valid packet limit been used ? */
|
/* Has valid packet limit been used ? */
|
||||||
if (unlikely(psz > mtu)) {
|
if (unlikely(psz > mtu)) {
|
||||||
kfree_skb_list(buf);
|
__skb_queue_purge(list);
|
||||||
return -EMSGSIZE;
|
return -EMSGSIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Prepare each packet for sending, and add to outqueue: */
|
/* Prepare each packet for sending, and add to outqueue: */
|
||||||
while (buf) {
|
skb_queue_walk_safe(list, skb, tmp) {
|
||||||
next = buf->next;
|
__skb_unlink(skb, list);
|
||||||
msg = buf_msg(buf);
|
msg = buf_msg(skb);
|
||||||
msg_set_word(msg, 2, ((ack << 16) | mod(seqno)));
|
msg_set_word(msg, 2, ((ack << 16) | mod(seqno)));
|
||||||
msg_set_bcast_ack(msg, bc_last_in);
|
msg_set_bcast_ack(msg, bc_last_in);
|
||||||
|
|
||||||
if (!link->first_out) {
|
if (skb_queue_len(outqueue) < sndlim) {
|
||||||
link->first_out = buf;
|
__skb_queue_tail(outqueue, skb);
|
||||||
} else if (qsz < sndlim) {
|
tipc_bearer_send(link->bearer_id, skb, addr);
|
||||||
link->last_out->next = buf;
|
link->next_out = NULL;
|
||||||
} else if (tipc_msg_bundle(link->last_out, buf, mtu)) {
|
link->unacked_window = 0;
|
||||||
|
} else if (tipc_msg_bundle(outqueue, skb, mtu)) {
|
||||||
link->stats.sent_bundled++;
|
link->stats.sent_bundled++;
|
||||||
buf = next;
|
|
||||||
next = buf->next;
|
|
||||||
continue;
|
continue;
|
||||||
} else if (tipc_msg_make_bundle(&buf, mtu, link->addr)) {
|
} else if (tipc_msg_make_bundle(outqueue, skb, mtu,
|
||||||
|
link->addr)) {
|
||||||
link->stats.sent_bundled++;
|
link->stats.sent_bundled++;
|
||||||
link->stats.sent_bundles++;
|
link->stats.sent_bundles++;
|
||||||
link->last_out->next = buf;
|
|
||||||
if (!link->next_out)
|
if (!link->next_out)
|
||||||
link->next_out = buf;
|
link->next_out = skb_peek_tail(outqueue);
|
||||||
} else {
|
} else {
|
||||||
link->last_out->next = buf;
|
__skb_queue_tail(outqueue, skb);
|
||||||
if (!link->next_out)
|
if (!link->next_out)
|
||||||
link->next_out = buf;
|
link->next_out = skb;
|
||||||
}
|
|
||||||
|
|
||||||
/* Send packet if possible: */
|
|
||||||
if (likely(++qsz <= sndlim)) {
|
|
||||||
tipc_bearer_send(link->bearer_id, buf, addr);
|
|
||||||
link->next_out = next;
|
|
||||||
link->unacked_window = 0;
|
|
||||||
}
|
}
|
||||||
seqno++;
|
seqno++;
|
||||||
link->last_out = buf;
|
|
||||||
buf = next;
|
|
||||||
}
|
}
|
||||||
link->next_out_no = seqno;
|
link->next_out_no = seqno;
|
||||||
link->out_queue_size = qsz;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
|
||||||
|
{
|
||||||
|
__skb_queue_head_init(list);
|
||||||
|
__skb_queue_tail(list, skb);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
struct sk_buff_head head;
|
||||||
|
|
||||||
|
skb2list(skb, &head);
|
||||||
|
return __tipc_link_xmit(link, &head);
|
||||||
|
}
|
||||||
|
|
||||||
|
int tipc_link_xmit_skb(struct sk_buff *skb, u32 dnode, u32 selector)
|
||||||
|
{
|
||||||
|
struct sk_buff_head head;
|
||||||
|
|
||||||
|
skb2list(skb, &head);
|
||||||
|
return tipc_link_xmit(&head, dnode, selector);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tipc_link_xmit() is the general link level function for message sending
|
* tipc_link_xmit() is the general link level function for message sending
|
||||||
* @buf: chain of buffers containing message
|
* @list: chain of buffers containing message
|
||||||
* @dsz: amount of user data to be sent
|
* @dsz: amount of user data to be sent
|
||||||
* @dnode: address of destination node
|
* @dnode: address of destination node
|
||||||
* @selector: a number used for deterministic link selection
|
* @selector: a number used for deterministic link selection
|
||||||
* Consumes the buffer chain, except when returning -ELINKCONG
|
* Consumes the buffer chain, except when returning -ELINKCONG
|
||||||
* Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
|
* Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
|
||||||
*/
|
*/
|
||||||
int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector)
|
int tipc_link_xmit(struct sk_buff_head *list, u32 dnode, u32 selector)
|
||||||
{
|
{
|
||||||
struct tipc_link *link = NULL;
|
struct tipc_link *link = NULL;
|
||||||
struct tipc_node *node;
|
struct tipc_node *node;
|
||||||
|
@ -815,17 +798,22 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector)
|
||||||
tipc_node_lock(node);
|
tipc_node_lock(node);
|
||||||
link = node->active_links[selector & 1];
|
link = node->active_links[selector & 1];
|
||||||
if (link)
|
if (link)
|
||||||
rc = __tipc_link_xmit(link, buf);
|
rc = __tipc_link_xmit(link, list);
|
||||||
tipc_node_unlock(node);
|
tipc_node_unlock(node);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (link)
|
if (link)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
if (likely(in_own_node(dnode)))
|
if (likely(in_own_node(dnode))) {
|
||||||
return tipc_sk_rcv(buf);
|
/* As a node local message chain never contains more than one
|
||||||
|
* buffer, we just need to dequeue one SKB buffer from the
|
||||||
|
* head list.
|
||||||
|
*/
|
||||||
|
return tipc_sk_rcv(__skb_dequeue(list));
|
||||||
|
}
|
||||||
|
__skb_queue_purge(list);
|
||||||
|
|
||||||
kfree_skb_list(buf);
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -839,17 +827,17 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector)
|
||||||
*/
|
*/
|
||||||
static void tipc_link_sync_xmit(struct tipc_link *link)
|
static void tipc_link_sync_xmit(struct tipc_link *link)
|
||||||
{
|
{
|
||||||
struct sk_buff *buf;
|
struct sk_buff *skb;
|
||||||
struct tipc_msg *msg;
|
struct tipc_msg *msg;
|
||||||
|
|
||||||
buf = tipc_buf_acquire(INT_H_SIZE);
|
skb = tipc_buf_acquire(INT_H_SIZE);
|
||||||
if (!buf)
|
if (!skb)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
msg = buf_msg(buf);
|
msg = buf_msg(skb);
|
||||||
tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr);
|
tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr);
|
||||||
msg_set_last_bcast(msg, link->owner->bclink.acked);
|
msg_set_last_bcast(msg, link->owner->bclink.acked);
|
||||||
__tipc_link_xmit(link, buf);
|
__tipc_link_xmit_skb(link, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -869,85 +857,46 @@ static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
|
||||||
kfree_skb(buf);
|
kfree_skb(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list,
|
||||||
* tipc_link_push_packet: Push one unsent packet to the media
|
const struct sk_buff *skb)
|
||||||
*/
|
|
||||||
static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
|
|
||||||
{
|
{
|
||||||
struct sk_buff *buf = l_ptr->first_out;
|
if (skb_queue_is_last(list, skb))
|
||||||
u32 r_q_size = l_ptr->retransm_queue_size;
|
return NULL;
|
||||||
u32 r_q_head = l_ptr->retransm_queue_head;
|
return skb->next;
|
||||||
|
}
|
||||||
|
|
||||||
/* Step to position where retransmission failed, if any, */
|
/*
|
||||||
/* consider that buffers may have been released in meantime */
|
* tipc_link_push_packets - push unsent packets to bearer
|
||||||
if (r_q_size && buf) {
|
*
|
||||||
u32 last = lesser(mod(r_q_head + r_q_size),
|
* Push out the unsent messages of a link where congestion
|
||||||
link_last_sent(l_ptr));
|
* has abated. Node is locked.
|
||||||
u32 first = buf_seqno(buf);
|
*
|
||||||
|
* Called with node locked
|
||||||
|
*/
|
||||||
|
void tipc_link_push_packets(struct tipc_link *l_ptr)
|
||||||
|
{
|
||||||
|
struct sk_buff_head *outqueue = &l_ptr->outqueue;
|
||||||
|
struct sk_buff *skb = l_ptr->next_out;
|
||||||
|
struct tipc_msg *msg;
|
||||||
|
u32 next, first;
|
||||||
|
|
||||||
while (buf && less(first, r_q_head)) {
|
skb_queue_walk_from(outqueue, skb) {
|
||||||
first = mod(first + 1);
|
msg = buf_msg(skb);
|
||||||
buf = buf->next;
|
next = msg_seqno(msg);
|
||||||
}
|
first = buf_seqno(skb_peek(outqueue));
|
||||||
l_ptr->retransm_queue_head = r_q_head = first;
|
|
||||||
l_ptr->retransm_queue_size = r_q_size = mod(last - first);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Continue retransmission now, if there is anything: */
|
|
||||||
if (r_q_size && buf) {
|
|
||||||
msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
|
|
||||||
msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
|
|
||||||
tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
|
|
||||||
l_ptr->retransm_queue_head = mod(++r_q_head);
|
|
||||||
l_ptr->retransm_queue_size = --r_q_size;
|
|
||||||
l_ptr->stats.retransmitted++;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Send deferred protocol message, if any: */
|
|
||||||
buf = l_ptr->proto_msg_queue;
|
|
||||||
if (buf) {
|
|
||||||
msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
|
|
||||||
msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
|
|
||||||
tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
|
|
||||||
l_ptr->unacked_window = 0;
|
|
||||||
kfree_skb(buf);
|
|
||||||
l_ptr->proto_msg_queue = NULL;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Send one deferred data message, if send window not full: */
|
|
||||||
buf = l_ptr->next_out;
|
|
||||||
if (buf) {
|
|
||||||
struct tipc_msg *msg = buf_msg(buf);
|
|
||||||
u32 next = msg_seqno(msg);
|
|
||||||
u32 first = buf_seqno(l_ptr->first_out);
|
|
||||||
|
|
||||||
if (mod(next - first) < l_ptr->queue_limit[0]) {
|
if (mod(next - first) < l_ptr->queue_limit[0]) {
|
||||||
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
|
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
|
||||||
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
|
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
|
||||||
tipc_bearer_send(l_ptr->bearer_id, buf,
|
|
||||||
&l_ptr->media_addr);
|
|
||||||
if (msg_user(msg) == MSG_BUNDLER)
|
if (msg_user(msg) == MSG_BUNDLER)
|
||||||
msg_set_type(msg, BUNDLE_CLOSED);
|
TIPC_SKB_CB(skb)->bundling = false;
|
||||||
l_ptr->next_out = buf->next;
|
tipc_bearer_send(l_ptr->bearer_id, skb,
|
||||||
return 0;
|
&l_ptr->media_addr);
|
||||||
|
l_ptr->next_out = tipc_skb_queue_next(outqueue, skb);
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* push_queue(): push out the unsent messages of a link where
|
|
||||||
* congestion has abated. Node is locked
|
|
||||||
*/
|
|
||||||
void tipc_link_push_queue(struct tipc_link *l_ptr)
|
|
||||||
{
|
|
||||||
u32 res;
|
|
||||||
|
|
||||||
do {
|
|
||||||
res = tipc_link_push_packet(l_ptr);
|
|
||||||
} while (!res);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void tipc_link_reset_all(struct tipc_node *node)
|
void tipc_link_reset_all(struct tipc_node *node)
|
||||||
|
@ -1011,20 +960,20 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
|
void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
|
||||||
u32 retransmits)
|
u32 retransmits)
|
||||||
{
|
{
|
||||||
struct tipc_msg *msg;
|
struct tipc_msg *msg;
|
||||||
|
|
||||||
if (!buf)
|
if (!skb)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
msg = buf_msg(buf);
|
msg = buf_msg(skb);
|
||||||
|
|
||||||
/* Detect repeated retransmit failures */
|
/* Detect repeated retransmit failures */
|
||||||
if (l_ptr->last_retransmitted == msg_seqno(msg)) {
|
if (l_ptr->last_retransmitted == msg_seqno(msg)) {
|
||||||
if (++l_ptr->stale_count > 100) {
|
if (++l_ptr->stale_count > 100) {
|
||||||
link_retransmit_failure(l_ptr, buf);
|
link_retransmit_failure(l_ptr, skb);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -1032,38 +981,29 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
|
||||||
l_ptr->stale_count = 1;
|
l_ptr->stale_count = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (retransmits && (buf != l_ptr->next_out) && buf) {
|
skb_queue_walk_from(&l_ptr->outqueue, skb) {
|
||||||
msg = buf_msg(buf);
|
if (!retransmits || skb == l_ptr->next_out)
|
||||||
|
break;
|
||||||
|
msg = buf_msg(skb);
|
||||||
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
|
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
|
||||||
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
|
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
|
||||||
tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
|
tipc_bearer_send(l_ptr->bearer_id, skb, &l_ptr->media_addr);
|
||||||
buf = buf->next;
|
|
||||||
retransmits--;
|
retransmits--;
|
||||||
l_ptr->stats.retransmitted++;
|
l_ptr->stats.retransmitted++;
|
||||||
}
|
}
|
||||||
|
|
||||||
l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
static void link_retrieve_defq(struct tipc_link *link,
|
||||||
* link_insert_deferred_queue - insert deferred messages back into receive chain
|
struct sk_buff_head *list)
|
||||||
*/
|
|
||||||
static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
|
|
||||||
struct sk_buff *buf)
|
|
||||||
{
|
{
|
||||||
u32 seq_no;
|
u32 seq_no;
|
||||||
|
|
||||||
if (l_ptr->oldest_deferred_in == NULL)
|
if (skb_queue_empty(&link->deferred_queue))
|
||||||
return buf;
|
return;
|
||||||
|
|
||||||
seq_no = buf_seqno(l_ptr->oldest_deferred_in);
|
seq_no = buf_seqno(skb_peek(&link->deferred_queue));
|
||||||
if (seq_no == mod(l_ptr->next_in_no)) {
|
if (seq_no == mod(link->next_in_no))
|
||||||
l_ptr->newest_deferred_in->next = buf;
|
skb_queue_splice_tail_init(&link->deferred_queue, list);
|
||||||
buf = l_ptr->oldest_deferred_in;
|
|
||||||
l_ptr->oldest_deferred_in = NULL;
|
|
||||||
l_ptr->deferred_inqueue_sz = 0;
|
|
||||||
}
|
|
||||||
return buf;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1123,43 +1063,42 @@ static int link_recv_buf_validate(struct sk_buff *buf)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tipc_rcv - process TIPC packets/messages arriving from off-node
|
* tipc_rcv - process TIPC packets/messages arriving from off-node
|
||||||
* @head: pointer to message buffer chain
|
* @skb: TIPC packet
|
||||||
* @b_ptr: pointer to bearer message arrived on
|
* @b_ptr: pointer to bearer message arrived on
|
||||||
*
|
*
|
||||||
* Invoked with no locks held. Bearer pointer must point to a valid bearer
|
* Invoked with no locks held. Bearer pointer must point to a valid bearer
|
||||||
* structure (i.e. cannot be NULL), but bearer can be inactive.
|
* structure (i.e. cannot be NULL), but bearer can be inactive.
|
||||||
*/
|
*/
|
||||||
void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
|
void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
|
||||||
{
|
{
|
||||||
while (head) {
|
struct sk_buff_head head;
|
||||||
struct tipc_node *n_ptr;
|
struct tipc_node *n_ptr;
|
||||||
struct tipc_link *l_ptr;
|
struct tipc_link *l_ptr;
|
||||||
struct sk_buff *crs;
|
struct sk_buff *skb1, *tmp;
|
||||||
struct sk_buff *buf = head;
|
struct tipc_msg *msg;
|
||||||
struct tipc_msg *msg;
|
u32 seq_no;
|
||||||
u32 seq_no;
|
u32 ackd;
|
||||||
u32 ackd;
|
u32 released;
|
||||||
u32 released = 0;
|
|
||||||
|
|
||||||
head = head->next;
|
skb2list(skb, &head);
|
||||||
buf->next = NULL;
|
|
||||||
|
|
||||||
|
while ((skb = __skb_dequeue(&head))) {
|
||||||
/* Ensure message is well-formed */
|
/* Ensure message is well-formed */
|
||||||
if (unlikely(!link_recv_buf_validate(buf)))
|
if (unlikely(!link_recv_buf_validate(skb)))
|
||||||
goto discard;
|
goto discard;
|
||||||
|
|
||||||
/* Ensure message data is a single contiguous unit */
|
/* Ensure message data is a single contiguous unit */
|
||||||
if (unlikely(skb_linearize(buf)))
|
if (unlikely(skb_linearize(skb)))
|
||||||
goto discard;
|
goto discard;
|
||||||
|
|
||||||
/* Handle arrival of a non-unicast link message */
|
/* Handle arrival of a non-unicast link message */
|
||||||
msg = buf_msg(buf);
|
msg = buf_msg(skb);
|
||||||
|
|
||||||
if (unlikely(msg_non_seq(msg))) {
|
if (unlikely(msg_non_seq(msg))) {
|
||||||
if (msg_user(msg) == LINK_CONFIG)
|
if (msg_user(msg) == LINK_CONFIG)
|
||||||
tipc_disc_rcv(buf, b_ptr);
|
tipc_disc_rcv(skb, b_ptr);
|
||||||
else
|
else
|
||||||
tipc_bclink_rcv(buf);
|
tipc_bclink_rcv(skb);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1198,22 +1137,19 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
|
||||||
if (n_ptr->bclink.recv_permitted)
|
if (n_ptr->bclink.recv_permitted)
|
||||||
tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
|
tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
|
||||||
|
|
||||||
crs = l_ptr->first_out;
|
released = 0;
|
||||||
while ((crs != l_ptr->next_out) &&
|
skb_queue_walk_safe(&l_ptr->outqueue, skb1, tmp) {
|
||||||
less_eq(buf_seqno(crs), ackd)) {
|
if (skb1 == l_ptr->next_out ||
|
||||||
struct sk_buff *next = crs->next;
|
more(buf_seqno(skb1), ackd))
|
||||||
kfree_skb(crs);
|
break;
|
||||||
crs = next;
|
__skb_unlink(skb1, &l_ptr->outqueue);
|
||||||
released++;
|
kfree_skb(skb1);
|
||||||
}
|
released = 1;
|
||||||
if (released) {
|
|
||||||
l_ptr->first_out = crs;
|
|
||||||
l_ptr->out_queue_size -= released;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Try sending any messages link endpoint has pending */
|
/* Try sending any messages link endpoint has pending */
|
||||||
if (unlikely(l_ptr->next_out))
|
if (unlikely(l_ptr->next_out))
|
||||||
tipc_link_push_queue(l_ptr);
|
tipc_link_push_packets(l_ptr);
|
||||||
|
|
||||||
if (released && !skb_queue_empty(&l_ptr->waiting_sks)) {
|
if (released && !skb_queue_empty(&l_ptr->waiting_sks)) {
|
||||||
link_prepare_wakeup(l_ptr);
|
link_prepare_wakeup(l_ptr);
|
||||||
|
@ -1223,8 +1159,8 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
|
||||||
/* Process the incoming packet */
|
/* Process the incoming packet */
|
||||||
if (unlikely(!link_working_working(l_ptr))) {
|
if (unlikely(!link_working_working(l_ptr))) {
|
||||||
if (msg_user(msg) == LINK_PROTOCOL) {
|
if (msg_user(msg) == LINK_PROTOCOL) {
|
||||||
tipc_link_proto_rcv(l_ptr, buf);
|
tipc_link_proto_rcv(l_ptr, skb);
|
||||||
head = link_insert_deferred_queue(l_ptr, head);
|
link_retrieve_defq(l_ptr, &head);
|
||||||
tipc_node_unlock(n_ptr);
|
tipc_node_unlock(n_ptr);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -1234,8 +1170,7 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
|
||||||
|
|
||||||
if (link_working_working(l_ptr)) {
|
if (link_working_working(l_ptr)) {
|
||||||
/* Re-insert buffer in front of queue */
|
/* Re-insert buffer in front of queue */
|
||||||
buf->next = head;
|
__skb_queue_head(&head, skb);
|
||||||
head = buf;
|
|
||||||
tipc_node_unlock(n_ptr);
|
tipc_node_unlock(n_ptr);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -1244,33 +1179,33 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
|
||||||
|
|
||||||
/* Link is now in state WORKING_WORKING */
|
/* Link is now in state WORKING_WORKING */
|
||||||
if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
|
if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
|
||||||
link_handle_out_of_seq_msg(l_ptr, buf);
|
link_handle_out_of_seq_msg(l_ptr, skb);
|
||||||
head = link_insert_deferred_queue(l_ptr, head);
|
link_retrieve_defq(l_ptr, &head);
|
||||||
tipc_node_unlock(n_ptr);
|
tipc_node_unlock(n_ptr);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
l_ptr->next_in_no++;
|
l_ptr->next_in_no++;
|
||||||
if (unlikely(l_ptr->oldest_deferred_in))
|
if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue)))
|
||||||
head = link_insert_deferred_queue(l_ptr, head);
|
link_retrieve_defq(l_ptr, &head);
|
||||||
|
|
||||||
if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
|
if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
|
||||||
l_ptr->stats.sent_acks++;
|
l_ptr->stats.sent_acks++;
|
||||||
tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
|
tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tipc_link_prepare_input(l_ptr, &buf)) {
|
if (tipc_link_prepare_input(l_ptr, &skb)) {
|
||||||
tipc_node_unlock(n_ptr);
|
tipc_node_unlock(n_ptr);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
tipc_node_unlock(n_ptr);
|
tipc_node_unlock(n_ptr);
|
||||||
msg = buf_msg(buf);
|
|
||||||
if (tipc_link_input(l_ptr, buf) != 0)
|
if (tipc_link_input(l_ptr, skb) != 0)
|
||||||
goto discard;
|
goto discard;
|
||||||
continue;
|
continue;
|
||||||
unlock_discard:
|
unlock_discard:
|
||||||
tipc_node_unlock(n_ptr);
|
tipc_node_unlock(n_ptr);
|
||||||
discard:
|
discard:
|
||||||
kfree_skb(buf);
|
kfree_skb(skb);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1353,48 +1288,37 @@ static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf)
|
||||||
*
|
*
|
||||||
* Returns increase in queue length (i.e. 0 or 1)
|
* Returns increase in queue length (i.e. 0 or 1)
|
||||||
*/
|
*/
|
||||||
u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
|
u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
|
||||||
struct sk_buff *buf)
|
|
||||||
{
|
{
|
||||||
struct sk_buff *queue_buf;
|
struct sk_buff *skb1;
|
||||||
struct sk_buff **prev;
|
u32 seq_no = buf_seqno(skb);
|
||||||
u32 seq_no = buf_seqno(buf);
|
|
||||||
|
|
||||||
buf->next = NULL;
|
|
||||||
|
|
||||||
/* Empty queue ? */
|
/* Empty queue ? */
|
||||||
if (*head == NULL) {
|
if (skb_queue_empty(list)) {
|
||||||
*head = *tail = buf;
|
__skb_queue_tail(list, skb);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Last ? */
|
/* Last ? */
|
||||||
if (less(buf_seqno(*tail), seq_no)) {
|
if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
|
||||||
(*tail)->next = buf;
|
__skb_queue_tail(list, skb);
|
||||||
*tail = buf;
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Locate insertion point in queue, then insert; discard if duplicate */
|
/* Locate insertion point in queue, then insert; discard if duplicate */
|
||||||
prev = head;
|
skb_queue_walk(list, skb1) {
|
||||||
queue_buf = *head;
|
u32 curr_seqno = buf_seqno(skb1);
|
||||||
for (;;) {
|
|
||||||
u32 curr_seqno = buf_seqno(queue_buf);
|
|
||||||
|
|
||||||
if (seq_no == curr_seqno) {
|
if (seq_no == curr_seqno) {
|
||||||
kfree_skb(buf);
|
kfree_skb(skb);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (less(seq_no, curr_seqno))
|
if (less(seq_no, curr_seqno))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
prev = &queue_buf->next;
|
|
||||||
queue_buf = queue_buf->next;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
buf->next = queue_buf;
|
__skb_queue_before(list, skb1, skb);
|
||||||
*prev = buf;
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1424,15 +1348,14 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
|
if (tipc_link_defer_pkt(&l_ptr->deferred_queue, buf)) {
|
||||||
&l_ptr->newest_deferred_in, buf)) {
|
|
||||||
l_ptr->deferred_inqueue_sz++;
|
|
||||||
l_ptr->stats.deferred_recv++;
|
l_ptr->stats.deferred_recv++;
|
||||||
TIPC_SKB_CB(buf)->deferred = true;
|
TIPC_SKB_CB(buf)->deferred = true;
|
||||||
if ((l_ptr->deferred_inqueue_sz % 16) == 1)
|
if ((skb_queue_len(&l_ptr->deferred_queue) % 16) == 1)
|
||||||
tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
|
tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
|
||||||
} else
|
} else {
|
||||||
l_ptr->stats.duplicates++;
|
l_ptr->stats.duplicates++;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1446,12 +1369,6 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
|
||||||
u32 msg_size = sizeof(l_ptr->proto_msg);
|
u32 msg_size = sizeof(l_ptr->proto_msg);
|
||||||
int r_flag;
|
int r_flag;
|
||||||
|
|
||||||
/* Discard any previous message that was deferred due to congestion */
|
|
||||||
if (l_ptr->proto_msg_queue) {
|
|
||||||
kfree_skb(l_ptr->proto_msg_queue);
|
|
||||||
l_ptr->proto_msg_queue = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Don't send protocol message during link changeover */
|
/* Don't send protocol message during link changeover */
|
||||||
if (l_ptr->exp_msg_count)
|
if (l_ptr->exp_msg_count)
|
||||||
return;
|
return;
|
||||||
|
@ -1474,8 +1391,8 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
|
||||||
if (l_ptr->next_out)
|
if (l_ptr->next_out)
|
||||||
next_sent = buf_seqno(l_ptr->next_out);
|
next_sent = buf_seqno(l_ptr->next_out);
|
||||||
msg_set_next_sent(msg, next_sent);
|
msg_set_next_sent(msg, next_sent);
|
||||||
if (l_ptr->oldest_deferred_in) {
|
if (!skb_queue_empty(&l_ptr->deferred_queue)) {
|
||||||
u32 rec = buf_seqno(l_ptr->oldest_deferred_in);
|
u32 rec = buf_seqno(skb_peek(&l_ptr->deferred_queue));
|
||||||
gap = mod(rec - mod(l_ptr->next_in_no));
|
gap = mod(rec - mod(l_ptr->next_in_no));
|
||||||
}
|
}
|
||||||
msg_set_seq_gap(msg, gap);
|
msg_set_seq_gap(msg, gap);
|
||||||
|
@ -1663,7 +1580,7 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
|
||||||
}
|
}
|
||||||
if (msg_seq_gap(msg)) {
|
if (msg_seq_gap(msg)) {
|
||||||
l_ptr->stats.recv_nacks++;
|
l_ptr->stats.recv_nacks++;
|
||||||
tipc_link_retransmit(l_ptr, l_ptr->first_out,
|
tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->outqueue),
|
||||||
msg_seq_gap(msg));
|
msg_seq_gap(msg));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -1682,7 +1599,7 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
|
||||||
u32 selector)
|
u32 selector)
|
||||||
{
|
{
|
||||||
struct tipc_link *tunnel;
|
struct tipc_link *tunnel;
|
||||||
struct sk_buff *buf;
|
struct sk_buff *skb;
|
||||||
u32 length = msg_size(msg);
|
u32 length = msg_size(msg);
|
||||||
|
|
||||||
tunnel = l_ptr->owner->active_links[selector & 1];
|
tunnel = l_ptr->owner->active_links[selector & 1];
|
||||||
|
@ -1691,14 +1608,14 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
msg_set_size(tunnel_hdr, length + INT_H_SIZE);
|
msg_set_size(tunnel_hdr, length + INT_H_SIZE);
|
||||||
buf = tipc_buf_acquire(length + INT_H_SIZE);
|
skb = tipc_buf_acquire(length + INT_H_SIZE);
|
||||||
if (!buf) {
|
if (!skb) {
|
||||||
pr_warn("%sunable to send tunnel msg\n", link_co_err);
|
pr_warn("%sunable to send tunnel msg\n", link_co_err);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
|
skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
|
||||||
skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
|
skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
|
||||||
__tipc_link_xmit(tunnel, buf);
|
__tipc_link_xmit_skb(tunnel, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1710,10 +1627,10 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
|
||||||
*/
|
*/
|
||||||
void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
|
void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
|
||||||
{
|
{
|
||||||
u32 msgcount = l_ptr->out_queue_size;
|
u32 msgcount = skb_queue_len(&l_ptr->outqueue);
|
||||||
struct sk_buff *crs = l_ptr->first_out;
|
|
||||||
struct tipc_link *tunnel = l_ptr->owner->active_links[0];
|
struct tipc_link *tunnel = l_ptr->owner->active_links[0];
|
||||||
struct tipc_msg tunnel_hdr;
|
struct tipc_msg tunnel_hdr;
|
||||||
|
struct sk_buff *skb;
|
||||||
int split_bundles;
|
int split_bundles;
|
||||||
|
|
||||||
if (!tunnel)
|
if (!tunnel)
|
||||||
|
@ -1724,14 +1641,12 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
|
||||||
msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
|
msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
|
||||||
msg_set_msgcnt(&tunnel_hdr, msgcount);
|
msg_set_msgcnt(&tunnel_hdr, msgcount);
|
||||||
|
|
||||||
if (!l_ptr->first_out) {
|
if (skb_queue_empty(&l_ptr->outqueue)) {
|
||||||
struct sk_buff *buf;
|
skb = tipc_buf_acquire(INT_H_SIZE);
|
||||||
|
if (skb) {
|
||||||
buf = tipc_buf_acquire(INT_H_SIZE);
|
skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
|
||||||
if (buf) {
|
|
||||||
skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
|
|
||||||
msg_set_size(&tunnel_hdr, INT_H_SIZE);
|
msg_set_size(&tunnel_hdr, INT_H_SIZE);
|
||||||
__tipc_link_xmit(tunnel, buf);
|
__tipc_link_xmit_skb(tunnel, skb);
|
||||||
} else {
|
} else {
|
||||||
pr_warn("%sunable to send changeover msg\n",
|
pr_warn("%sunable to send changeover msg\n",
|
||||||
link_co_err);
|
link_co_err);
|
||||||
|
@ -1742,8 +1657,8 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
|
||||||
split_bundles = (l_ptr->owner->active_links[0] !=
|
split_bundles = (l_ptr->owner->active_links[0] !=
|
||||||
l_ptr->owner->active_links[1]);
|
l_ptr->owner->active_links[1]);
|
||||||
|
|
||||||
while (crs) {
|
skb_queue_walk(&l_ptr->outqueue, skb) {
|
||||||
struct tipc_msg *msg = buf_msg(crs);
|
struct tipc_msg *msg = buf_msg(skb);
|
||||||
|
|
||||||
if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
|
if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
|
||||||
struct tipc_msg *m = msg_get_wrapped(msg);
|
struct tipc_msg *m = msg_get_wrapped(msg);
|
||||||
|
@ -1761,7 +1676,6 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
|
||||||
tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
|
tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
|
||||||
msg_link_selector(msg));
|
msg_link_selector(msg));
|
||||||
}
|
}
|
||||||
crs = crs->next;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1777,17 +1691,16 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
|
||||||
void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
|
void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
|
||||||
struct tipc_link *tunnel)
|
struct tipc_link *tunnel)
|
||||||
{
|
{
|
||||||
struct sk_buff *iter;
|
struct sk_buff *skb;
|
||||||
struct tipc_msg tunnel_hdr;
|
struct tipc_msg tunnel_hdr;
|
||||||
|
|
||||||
tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
|
tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
|
||||||
DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
|
DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
|
||||||
msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
|
msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue));
|
||||||
msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
|
msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
|
||||||
iter = l_ptr->first_out;
|
skb_queue_walk(&l_ptr->outqueue, skb) {
|
||||||
while (iter) {
|
struct sk_buff *outskb;
|
||||||
struct sk_buff *outbuf;
|
struct tipc_msg *msg = buf_msg(skb);
|
||||||
struct tipc_msg *msg = buf_msg(iter);
|
|
||||||
u32 length = msg_size(msg);
|
u32 length = msg_size(msg);
|
||||||
|
|
||||||
if (msg_user(msg) == MSG_BUNDLER)
|
if (msg_user(msg) == MSG_BUNDLER)
|
||||||
|
@ -1795,19 +1708,18 @@ void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
|
||||||
msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
|
msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
|
||||||
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
|
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
|
||||||
msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
|
msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
|
||||||
outbuf = tipc_buf_acquire(length + INT_H_SIZE);
|
outskb = tipc_buf_acquire(length + INT_H_SIZE);
|
||||||
if (outbuf == NULL) {
|
if (outskb == NULL) {
|
||||||
pr_warn("%sunable to send duplicate msg\n",
|
pr_warn("%sunable to send duplicate msg\n",
|
||||||
link_co_err);
|
link_co_err);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
|
skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE);
|
||||||
skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
|
skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data,
|
||||||
length);
|
length);
|
||||||
__tipc_link_xmit(tunnel, outbuf);
|
__tipc_link_xmit_skb(tunnel, outskb);
|
||||||
if (!tipc_link_is_up(l_ptr))
|
if (!tipc_link_is_up(l_ptr))
|
||||||
return;
|
return;
|
||||||
iter = iter->next;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -119,20 +119,13 @@ struct tipc_stats {
|
||||||
* @max_pkt: current maximum packet size for this link
|
* @max_pkt: current maximum packet size for this link
|
||||||
* @max_pkt_target: desired maximum packet size for this link
|
* @max_pkt_target: desired maximum packet size for this link
|
||||||
* @max_pkt_probes: # of probes based on current (max_pkt, max_pkt_target)
|
* @max_pkt_probes: # of probes based on current (max_pkt, max_pkt_target)
|
||||||
* @out_queue_size: # of messages in outbound message queue
|
* @outqueue: outbound message queue
|
||||||
* @first_out: ptr to first outbound message in queue
|
|
||||||
* @last_out: ptr to last outbound message in queue
|
|
||||||
* @next_out_no: next sequence number to use for outbound messages
|
* @next_out_no: next sequence number to use for outbound messages
|
||||||
* @last_retransmitted: sequence number of most recently retransmitted message
|
* @last_retransmitted: sequence number of most recently retransmitted message
|
||||||
* @stale_count: # of identical retransmit requests made by peer
|
* @stale_count: # of identical retransmit requests made by peer
|
||||||
* @next_in_no: next sequence number to expect for inbound messages
|
* @next_in_no: next sequence number to expect for inbound messages
|
||||||
* @deferred_inqueue_sz: # of messages in inbound message queue
|
* @deferred_queue: deferred queue saved OOS b'cast message received from node
|
||||||
* @oldest_deferred_in: ptr to first inbound message in queue
|
|
||||||
* @newest_deferred_in: ptr to last inbound message in queue
|
|
||||||
* @unacked_window: # of inbound messages rx'd without ack'ing back to peer
|
* @unacked_window: # of inbound messages rx'd without ack'ing back to peer
|
||||||
* @proto_msg_queue: ptr to (single) outbound control message
|
|
||||||
* @retransm_queue_size: number of messages to retransmit
|
|
||||||
* @retransm_queue_head: sequence number of first message to retransmit
|
|
||||||
* @next_out: ptr to first unsent outbound message in queue
|
* @next_out: ptr to first unsent outbound message in queue
|
||||||
* @waiting_sks: linked list of sockets waiting for link congestion to abate
|
* @waiting_sks: linked list of sockets waiting for link congestion to abate
|
||||||
* @long_msg_seq_no: next identifier to use for outbound fragmented messages
|
* @long_msg_seq_no: next identifier to use for outbound fragmented messages
|
||||||
|
@ -176,24 +169,17 @@ struct tipc_link {
|
||||||
u32 max_pkt_probes;
|
u32 max_pkt_probes;
|
||||||
|
|
||||||
/* Sending */
|
/* Sending */
|
||||||
u32 out_queue_size;
|
struct sk_buff_head outqueue;
|
||||||
struct sk_buff *first_out;
|
|
||||||
struct sk_buff *last_out;
|
|
||||||
u32 next_out_no;
|
u32 next_out_no;
|
||||||
u32 last_retransmitted;
|
u32 last_retransmitted;
|
||||||
u32 stale_count;
|
u32 stale_count;
|
||||||
|
|
||||||
/* Reception */
|
/* Reception */
|
||||||
u32 next_in_no;
|
u32 next_in_no;
|
||||||
u32 deferred_inqueue_sz;
|
struct sk_buff_head deferred_queue;
|
||||||
struct sk_buff *oldest_deferred_in;
|
|
||||||
struct sk_buff *newest_deferred_in;
|
|
||||||
u32 unacked_window;
|
u32 unacked_window;
|
||||||
|
|
||||||
/* Congestion handling */
|
/* Congestion handling */
|
||||||
struct sk_buff *proto_msg_queue;
|
|
||||||
u32 retransm_queue_size;
|
|
||||||
u32 retransm_queue_head;
|
|
||||||
struct sk_buff *next_out;
|
struct sk_buff *next_out;
|
||||||
struct sk_buff_head waiting_sks;
|
struct sk_buff_head waiting_sks;
|
||||||
|
|
||||||
|
@ -227,18 +213,20 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area,
|
||||||
void tipc_link_reset_all(struct tipc_node *node);
|
void tipc_link_reset_all(struct tipc_node *node);
|
||||||
void tipc_link_reset(struct tipc_link *l_ptr);
|
void tipc_link_reset(struct tipc_link *l_ptr);
|
||||||
void tipc_link_reset_list(unsigned int bearer_id);
|
void tipc_link_reset_list(unsigned int bearer_id);
|
||||||
int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector);
|
int tipc_link_xmit_skb(struct sk_buff *skb, u32 dest, u32 selector);
|
||||||
int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf);
|
int tipc_link_xmit(struct sk_buff_head *list, u32 dest, u32 selector);
|
||||||
|
int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list);
|
||||||
u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
|
u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
|
||||||
void tipc_link_bundle_rcv(struct sk_buff *buf);
|
void tipc_link_bundle_rcv(struct sk_buff *buf);
|
||||||
void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
|
void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
|
||||||
u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
|
u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
|
||||||
void tipc_link_push_queue(struct tipc_link *l_ptr);
|
void tipc_link_push_packets(struct tipc_link *l_ptr);
|
||||||
u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
|
u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *buf);
|
||||||
struct sk_buff *buf);
|
|
||||||
void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window);
|
void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window);
|
||||||
void tipc_link_retransmit(struct tipc_link *l_ptr,
|
void tipc_link_retransmit(struct tipc_link *l_ptr,
|
||||||
struct sk_buff *start, u32 retransmits);
|
struct sk_buff *start, u32 retransmits);
|
||||||
|
struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list,
|
||||||
|
const struct sk_buff *skb);
|
||||||
|
|
||||||
int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb);
|
int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb);
|
||||||
int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info);
|
int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info);
|
||||||
|
@ -259,20 +247,16 @@ static inline u32 mod(u32 x)
|
||||||
return x & 0xffffu;
|
return x & 0xffffu;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int between(u32 lower, u32 upper, u32 n)
|
|
||||||
{
|
|
||||||
if ((lower < n) && (n < upper))
|
|
||||||
return 1;
|
|
||||||
if ((upper < lower) && ((n > lower) || (n < upper)))
|
|
||||||
return 1;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int less_eq(u32 left, u32 right)
|
static inline int less_eq(u32 left, u32 right)
|
||||||
{
|
{
|
||||||
return mod(right - left) < 32768u;
|
return mod(right - left) < 32768u;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int more(u32 left, u32 right)
|
||||||
|
{
|
||||||
|
return !less_eq(left, right);
|
||||||
|
}
|
||||||
|
|
||||||
static inline int less(u32 left, u32 right)
|
static inline int less(u32 left, u32 right)
|
||||||
{
|
{
|
||||||
return less_eq(left, right) && (mod(right) != mod(left));
|
return less_eq(left, right) && (mod(right) != mod(left));
|
||||||
|
@ -309,7 +293,7 @@ static inline int link_reset_reset(struct tipc_link *l_ptr)
|
||||||
|
|
||||||
static inline int link_congested(struct tipc_link *l_ptr)
|
static inline int link_congested(struct tipc_link *l_ptr)
|
||||||
{
|
{
|
||||||
return l_ptr->out_queue_size >= l_ptr->queue_limit[0];
|
return skb_queue_len(&l_ptr->outqueue) >= l_ptr->queue_limit[0];
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
125
net/tipc/msg.c
125
net/tipc/msg.c
|
@ -166,11 +166,12 @@ err:
|
||||||
* @offset: Posision in iov to start copying from
|
* @offset: Posision in iov to start copying from
|
||||||
* @dsz: Total length of user data
|
* @dsz: Total length of user data
|
||||||
* @pktmax: Max packet size that can be used
|
* @pktmax: Max packet size that can be used
|
||||||
* @chain: Buffer or chain of buffers to be returned to caller
|
* @list: Buffer or chain of buffers to be returned to caller
|
||||||
|
*
|
||||||
* Returns message data size or errno: -ENOMEM, -EFAULT
|
* Returns message data size or errno: -ENOMEM, -EFAULT
|
||||||
*/
|
*/
|
||||||
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
|
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
|
||||||
int offset, int dsz, int pktmax , struct sk_buff **chain)
|
int dsz, int pktmax, struct sk_buff_head *list)
|
||||||
{
|
{
|
||||||
int mhsz = msg_hdr_sz(mhdr);
|
int mhsz = msg_hdr_sz(mhdr);
|
||||||
int msz = mhsz + dsz;
|
int msz = mhsz + dsz;
|
||||||
|
@ -179,22 +180,22 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
|
||||||
int pktrem = pktmax;
|
int pktrem = pktmax;
|
||||||
int drem = dsz;
|
int drem = dsz;
|
||||||
struct tipc_msg pkthdr;
|
struct tipc_msg pkthdr;
|
||||||
struct sk_buff *buf, *prev;
|
struct sk_buff *skb;
|
||||||
char *pktpos;
|
char *pktpos;
|
||||||
int rc;
|
int rc;
|
||||||
uint chain_sz = 0;
|
|
||||||
msg_set_size(mhdr, msz);
|
msg_set_size(mhdr, msz);
|
||||||
|
|
||||||
/* No fragmentation needed? */
|
/* No fragmentation needed? */
|
||||||
if (likely(msz <= pktmax)) {
|
if (likely(msz <= pktmax)) {
|
||||||
buf = tipc_buf_acquire(msz);
|
skb = tipc_buf_acquire(msz);
|
||||||
*chain = buf;
|
if (unlikely(!skb))
|
||||||
if (unlikely(!buf))
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
skb_copy_to_linear_data(buf, mhdr, mhsz);
|
__skb_queue_tail(list, skb);
|
||||||
pktpos = buf->data + mhsz;
|
skb_copy_to_linear_data(skb, mhdr, mhsz);
|
||||||
TIPC_SKB_CB(buf)->chain_sz = 1;
|
pktpos = skb->data + mhsz;
|
||||||
if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iov, offset, dsz))
|
if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iov, offset,
|
||||||
|
dsz))
|
||||||
return dsz;
|
return dsz;
|
||||||
rc = -EFAULT;
|
rc = -EFAULT;
|
||||||
goto error;
|
goto error;
|
||||||
|
@ -207,15 +208,15 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
|
||||||
msg_set_fragm_no(&pkthdr, pktno);
|
msg_set_fragm_no(&pkthdr, pktno);
|
||||||
|
|
||||||
/* Prepare first fragment */
|
/* Prepare first fragment */
|
||||||
*chain = buf = tipc_buf_acquire(pktmax);
|
skb = tipc_buf_acquire(pktmax);
|
||||||
if (!buf)
|
if (!skb)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
chain_sz = 1;
|
__skb_queue_tail(list, skb);
|
||||||
pktpos = buf->data;
|
pktpos = skb->data;
|
||||||
skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE);
|
skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
|
||||||
pktpos += INT_H_SIZE;
|
pktpos += INT_H_SIZE;
|
||||||
pktrem -= INT_H_SIZE;
|
pktrem -= INT_H_SIZE;
|
||||||
skb_copy_to_linear_data_offset(buf, INT_H_SIZE, mhdr, mhsz);
|
skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
|
||||||
pktpos += mhsz;
|
pktpos += mhsz;
|
||||||
pktrem -= mhsz;
|
pktrem -= mhsz;
|
||||||
|
|
||||||
|
@ -238,43 +239,41 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
|
||||||
pktsz = drem + INT_H_SIZE;
|
pktsz = drem + INT_H_SIZE;
|
||||||
else
|
else
|
||||||
pktsz = pktmax;
|
pktsz = pktmax;
|
||||||
prev = buf;
|
skb = tipc_buf_acquire(pktsz);
|
||||||
buf = tipc_buf_acquire(pktsz);
|
if (!skb) {
|
||||||
if (!buf) {
|
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
chain_sz++;
|
__skb_queue_tail(list, skb);
|
||||||
prev->next = buf;
|
|
||||||
msg_set_type(&pkthdr, FRAGMENT);
|
msg_set_type(&pkthdr, FRAGMENT);
|
||||||
msg_set_size(&pkthdr, pktsz);
|
msg_set_size(&pkthdr, pktsz);
|
||||||
msg_set_fragm_no(&pkthdr, ++pktno);
|
msg_set_fragm_no(&pkthdr, ++pktno);
|
||||||
skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE);
|
skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
|
||||||
pktpos = buf->data + INT_H_SIZE;
|
pktpos = skb->data + INT_H_SIZE;
|
||||||
pktrem = pktsz - INT_H_SIZE;
|
pktrem = pktsz - INT_H_SIZE;
|
||||||
|
|
||||||
} while (1);
|
} while (1);
|
||||||
TIPC_SKB_CB(*chain)->chain_sz = chain_sz;
|
msg_set_type(buf_msg(skb), LAST_FRAGMENT);
|
||||||
msg_set_type(buf_msg(buf), LAST_FRAGMENT);
|
|
||||||
return dsz;
|
return dsz;
|
||||||
error:
|
error:
|
||||||
kfree_skb_list(*chain);
|
__skb_queue_purge(list);
|
||||||
*chain = NULL;
|
__skb_queue_head_init(list);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
|
* tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
|
||||||
* @bbuf: the existing buffer ("bundle")
|
* @list: the buffer chain of the existing buffer ("bundle")
|
||||||
* @buf: buffer to be appended
|
* @skb: buffer to be appended
|
||||||
* @mtu: max allowable size for the bundle buffer
|
* @mtu: max allowable size for the bundle buffer
|
||||||
* Consumes buffer if successful
|
* Consumes buffer if successful
|
||||||
* Returns true if bundling could be performed, otherwise false
|
* Returns true if bundling could be performed, otherwise false
|
||||||
*/
|
*/
|
||||||
bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu)
|
bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu)
|
||||||
{
|
{
|
||||||
struct tipc_msg *bmsg = buf_msg(bbuf);
|
struct sk_buff *bskb = skb_peek_tail(list);
|
||||||
struct tipc_msg *msg = buf_msg(buf);
|
struct tipc_msg *bmsg = buf_msg(bskb);
|
||||||
|
struct tipc_msg *msg = buf_msg(skb);
|
||||||
unsigned int bsz = msg_size(bmsg);
|
unsigned int bsz = msg_size(bmsg);
|
||||||
unsigned int msz = msg_size(msg);
|
unsigned int msz = msg_size(msg);
|
||||||
u32 start = align(bsz);
|
u32 start = align(bsz);
|
||||||
|
@ -289,35 +288,36 @@ bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu)
|
||||||
return false;
|
return false;
|
||||||
if (likely(msg_user(bmsg) != MSG_BUNDLER))
|
if (likely(msg_user(bmsg) != MSG_BUNDLER))
|
||||||
return false;
|
return false;
|
||||||
if (likely(msg_type(bmsg) != BUNDLE_OPEN))
|
if (likely(!TIPC_SKB_CB(bskb)->bundling))
|
||||||
return false;
|
return false;
|
||||||
if (unlikely(skb_tailroom(bbuf) < (pad + msz)))
|
if (unlikely(skb_tailroom(bskb) < (pad + msz)))
|
||||||
return false;
|
return false;
|
||||||
if (unlikely(max < (start + msz)))
|
if (unlikely(max < (start + msz)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
skb_put(bbuf, pad + msz);
|
skb_put(bskb, pad + msz);
|
||||||
skb_copy_to_linear_data_offset(bbuf, start, buf->data, msz);
|
skb_copy_to_linear_data_offset(bskb, start, skb->data, msz);
|
||||||
msg_set_size(bmsg, start + msz);
|
msg_set_size(bmsg, start + msz);
|
||||||
msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
|
msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
|
||||||
bbuf->next = buf->next;
|
kfree_skb(skb);
|
||||||
kfree_skb(buf);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tipc_msg_make_bundle(): Create bundle buf and append message to its tail
|
* tipc_msg_make_bundle(): Create bundle buf and append message to its tail
|
||||||
* @buf: buffer to be appended and replaced
|
* @list: the buffer chain
|
||||||
* @mtu: max allowable size for the bundle buffer, inclusive header
|
* @skb: buffer to be appended and replaced
|
||||||
|
* @mtu: max allowable size for the bundle buffer, inclusive header
|
||||||
* @dnode: destination node for message. (Not always present in header)
|
* @dnode: destination node for message. (Not always present in header)
|
||||||
* Replaces buffer if successful
|
* Replaces buffer if successful
|
||||||
* Returns true if success, otherwise false
|
* Returns true if success, otherwise false
|
||||||
*/
|
*/
|
||||||
bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode)
|
bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
|
||||||
|
u32 mtu, u32 dnode)
|
||||||
{
|
{
|
||||||
struct sk_buff *bbuf;
|
struct sk_buff *bskb;
|
||||||
struct tipc_msg *bmsg;
|
struct tipc_msg *bmsg;
|
||||||
struct tipc_msg *msg = buf_msg(*buf);
|
struct tipc_msg *msg = buf_msg(skb);
|
||||||
u32 msz = msg_size(msg);
|
u32 msz = msg_size(msg);
|
||||||
u32 max = mtu - INT_H_SIZE;
|
u32 max = mtu - INT_H_SIZE;
|
||||||
|
|
||||||
|
@ -330,20 +330,19 @@ bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode)
|
||||||
if (msz > (max / 2))
|
if (msz > (max / 2))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
bbuf = tipc_buf_acquire(max);
|
bskb = tipc_buf_acquire(max);
|
||||||
if (!bbuf)
|
if (!bskb)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
skb_trim(bbuf, INT_H_SIZE);
|
skb_trim(bskb, INT_H_SIZE);
|
||||||
bmsg = buf_msg(bbuf);
|
bmsg = buf_msg(bskb);
|
||||||
tipc_msg_init(bmsg, MSG_BUNDLER, BUNDLE_OPEN, INT_H_SIZE, dnode);
|
tipc_msg_init(bmsg, MSG_BUNDLER, 0, INT_H_SIZE, dnode);
|
||||||
msg_set_seqno(bmsg, msg_seqno(msg));
|
msg_set_seqno(bmsg, msg_seqno(msg));
|
||||||
msg_set_ack(bmsg, msg_ack(msg));
|
msg_set_ack(bmsg, msg_ack(msg));
|
||||||
msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
|
msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
|
||||||
bbuf->next = (*buf)->next;
|
TIPC_SKB_CB(bskb)->bundling = true;
|
||||||
tipc_msg_bundle(bbuf, *buf, mtu);
|
__skb_queue_tail(list, bskb);
|
||||||
*buf = bbuf;
|
return tipc_msg_bundle(list, skb, mtu);
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -429,22 +428,23 @@ int tipc_msg_eval(struct sk_buff *buf, u32 *dnode)
|
||||||
/* tipc_msg_reassemble() - clone a buffer chain of fragments and
|
/* tipc_msg_reassemble() - clone a buffer chain of fragments and
|
||||||
* reassemble the clones into one message
|
* reassemble the clones into one message
|
||||||
*/
|
*/
|
||||||
struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain)
|
struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list)
|
||||||
{
|
{
|
||||||
struct sk_buff *buf = chain;
|
struct sk_buff *skb;
|
||||||
struct sk_buff *frag = buf;
|
struct sk_buff *frag = NULL;
|
||||||
struct sk_buff *head = NULL;
|
struct sk_buff *head = NULL;
|
||||||
int hdr_sz;
|
int hdr_sz;
|
||||||
|
|
||||||
/* Copy header if single buffer */
|
/* Copy header if single buffer */
|
||||||
if (!buf->next) {
|
if (skb_queue_len(list) == 1) {
|
||||||
hdr_sz = skb_headroom(buf) + msg_hdr_sz(buf_msg(buf));
|
skb = skb_peek(list);
|
||||||
return __pskb_copy(buf, hdr_sz, GFP_ATOMIC);
|
hdr_sz = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
|
||||||
|
return __pskb_copy(skb, hdr_sz, GFP_ATOMIC);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Clone all fragments and reassemble */
|
/* Clone all fragments and reassemble */
|
||||||
while (buf) {
|
skb_queue_walk(list, skb) {
|
||||||
frag = skb_clone(buf, GFP_ATOMIC);
|
frag = skb_clone(skb, GFP_ATOMIC);
|
||||||
if (!frag)
|
if (!frag)
|
||||||
goto error;
|
goto error;
|
||||||
frag->next = NULL;
|
frag->next = NULL;
|
||||||
|
@ -452,7 +452,6 @@ struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain)
|
||||||
break;
|
break;
|
||||||
if (!head)
|
if (!head)
|
||||||
goto error;
|
goto error;
|
||||||
buf = buf->next;
|
|
||||||
}
|
}
|
||||||
return frag;
|
return frag;
|
||||||
error:
|
error:
|
||||||
|
|
|
@ -464,11 +464,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
|
||||||
#define FRAGMENT 1
|
#define FRAGMENT 1
|
||||||
#define LAST_FRAGMENT 2
|
#define LAST_FRAGMENT 2
|
||||||
|
|
||||||
/* Bundling protocol message types
|
|
||||||
*/
|
|
||||||
#define BUNDLE_OPEN 0
|
|
||||||
#define BUNDLE_CLOSED 1
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Link management protocol message types
|
* Link management protocol message types
|
||||||
*/
|
*/
|
||||||
|
@ -739,13 +734,14 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
|
||||||
|
|
||||||
int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
|
int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
|
||||||
|
|
||||||
bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu);
|
bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu);
|
||||||
|
|
||||||
bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode);
|
bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
|
||||||
|
u32 mtu, u32 dnode);
|
||||||
|
|
||||||
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
|
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
|
||||||
int offset, int dsz, int mtu , struct sk_buff **chain);
|
int dsz, int mtu, struct sk_buff_head *list);
|
||||||
|
|
||||||
struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain);
|
struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -114,9 +114,9 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
|
||||||
return buf;
|
return buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
void named_cluster_distribute(struct sk_buff *buf)
|
void named_cluster_distribute(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct sk_buff *obuf;
|
struct sk_buff *oskb;
|
||||||
struct tipc_node *node;
|
struct tipc_node *node;
|
||||||
u32 dnode;
|
u32 dnode;
|
||||||
|
|
||||||
|
@ -127,15 +127,15 @@ void named_cluster_distribute(struct sk_buff *buf)
|
||||||
continue;
|
continue;
|
||||||
if (!tipc_node_active_links(node))
|
if (!tipc_node_active_links(node))
|
||||||
continue;
|
continue;
|
||||||
obuf = skb_copy(buf, GFP_ATOMIC);
|
oskb = skb_copy(skb, GFP_ATOMIC);
|
||||||
if (!obuf)
|
if (!oskb)
|
||||||
break;
|
break;
|
||||||
msg_set_destnode(buf_msg(obuf), dnode);
|
msg_set_destnode(buf_msg(oskb), dnode);
|
||||||
tipc_link_xmit(obuf, dnode, dnode);
|
tipc_link_xmit_skb(oskb, dnode, dnode);
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
kfree_skb(buf);
|
kfree_skb(skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -190,15 +190,15 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* named_distribute - prepare name info for bulk distribution to another node
|
* named_distribute - prepare name info for bulk distribution to another node
|
||||||
* @msg_list: list of messages (buffers) to be returned from this function
|
* @list: list of messages (buffers) to be returned from this function
|
||||||
* @dnode: node to be updated
|
* @dnode: node to be updated
|
||||||
* @pls: linked list of publication items to be packed into buffer chain
|
* @pls: linked list of publication items to be packed into buffer chain
|
||||||
*/
|
*/
|
||||||
static void named_distribute(struct list_head *msg_list, u32 dnode,
|
static void named_distribute(struct sk_buff_head *list, u32 dnode,
|
||||||
struct publ_list *pls)
|
struct publ_list *pls)
|
||||||
{
|
{
|
||||||
struct publication *publ;
|
struct publication *publ;
|
||||||
struct sk_buff *buf = NULL;
|
struct sk_buff *skb = NULL;
|
||||||
struct distr_item *item = NULL;
|
struct distr_item *item = NULL;
|
||||||
uint dsz = pls->size * ITEM_SIZE;
|
uint dsz = pls->size * ITEM_SIZE;
|
||||||
uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE;
|
uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE;
|
||||||
|
@ -207,15 +207,15 @@ static void named_distribute(struct list_head *msg_list, u32 dnode,
|
||||||
|
|
||||||
list_for_each_entry(publ, &pls->list, local_list) {
|
list_for_each_entry(publ, &pls->list, local_list) {
|
||||||
/* Prepare next buffer: */
|
/* Prepare next buffer: */
|
||||||
if (!buf) {
|
if (!skb) {
|
||||||
msg_rem = min_t(uint, rem, msg_dsz);
|
msg_rem = min_t(uint, rem, msg_dsz);
|
||||||
rem -= msg_rem;
|
rem -= msg_rem;
|
||||||
buf = named_prepare_buf(PUBLICATION, msg_rem, dnode);
|
skb = named_prepare_buf(PUBLICATION, msg_rem, dnode);
|
||||||
if (!buf) {
|
if (!skb) {
|
||||||
pr_warn("Bulk publication failure\n");
|
pr_warn("Bulk publication failure\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
item = (struct distr_item *)msg_data(buf_msg(buf));
|
item = (struct distr_item *)msg_data(buf_msg(skb));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Pack publication into message: */
|
/* Pack publication into message: */
|
||||||
|
@ -225,8 +225,8 @@ static void named_distribute(struct list_head *msg_list, u32 dnode,
|
||||||
|
|
||||||
/* Append full buffer to list: */
|
/* Append full buffer to list: */
|
||||||
if (!msg_rem) {
|
if (!msg_rem) {
|
||||||
list_add_tail((struct list_head *)buf, msg_list);
|
__skb_queue_tail(list, skb);
|
||||||
buf = NULL;
|
skb = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -236,27 +236,57 @@ static void named_distribute(struct list_head *msg_list, u32 dnode,
|
||||||
*/
|
*/
|
||||||
void tipc_named_node_up(u32 dnode)
|
void tipc_named_node_up(u32 dnode)
|
||||||
{
|
{
|
||||||
LIST_HEAD(msg_list);
|
struct sk_buff_head head;
|
||||||
struct sk_buff *buf_chain;
|
|
||||||
|
__skb_queue_head_init(&head);
|
||||||
|
|
||||||
read_lock_bh(&tipc_nametbl_lock);
|
read_lock_bh(&tipc_nametbl_lock);
|
||||||
named_distribute(&msg_list, dnode, &publ_cluster);
|
named_distribute(&head, dnode, &publ_cluster);
|
||||||
named_distribute(&msg_list, dnode, &publ_zone);
|
named_distribute(&head, dnode, &publ_zone);
|
||||||
read_unlock_bh(&tipc_nametbl_lock);
|
read_unlock_bh(&tipc_nametbl_lock);
|
||||||
|
|
||||||
/* Convert circular list to linear list and send: */
|
tipc_link_xmit(&head, dnode, dnode);
|
||||||
buf_chain = (struct sk_buff *)msg_list.next;
|
}
|
||||||
((struct sk_buff *)msg_list.prev)->next = NULL;
|
|
||||||
tipc_link_xmit(buf_chain, dnode, dnode);
|
static void tipc_publ_subscribe(struct publication *publ, u32 addr)
|
||||||
|
{
|
||||||
|
struct tipc_node *node;
|
||||||
|
|
||||||
|
if (in_own_node(addr))
|
||||||
|
return;
|
||||||
|
|
||||||
|
node = tipc_node_find(addr);
|
||||||
|
if (!node) {
|
||||||
|
pr_warn("Node subscription rejected, unknown node 0x%x\n",
|
||||||
|
addr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
tipc_node_lock(node);
|
||||||
|
list_add_tail(&publ->nodesub_list, &node->publ_list);
|
||||||
|
tipc_node_unlock(node);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tipc_publ_unsubscribe(struct publication *publ, u32 addr)
|
||||||
|
{
|
||||||
|
struct tipc_node *node;
|
||||||
|
|
||||||
|
node = tipc_node_find(addr);
|
||||||
|
if (!node)
|
||||||
|
return;
|
||||||
|
|
||||||
|
tipc_node_lock(node);
|
||||||
|
list_del_init(&publ->nodesub_list);
|
||||||
|
tipc_node_unlock(node);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* named_purge_publ - remove publication associated with a failed node
|
* tipc_publ_purge - remove publication associated with a failed node
|
||||||
*
|
*
|
||||||
* Invoked for each publication issued by a newly failed node.
|
* Invoked for each publication issued by a newly failed node.
|
||||||
* Removes publication structure from name table & deletes it.
|
* Removes publication structure from name table & deletes it.
|
||||||
*/
|
*/
|
||||||
static void named_purge_publ(struct publication *publ)
|
static void tipc_publ_purge(struct publication *publ, u32 addr)
|
||||||
{
|
{
|
||||||
struct publication *p;
|
struct publication *p;
|
||||||
|
|
||||||
|
@ -264,7 +294,7 @@ static void named_purge_publ(struct publication *publ)
|
||||||
p = tipc_nametbl_remove_publ(publ->type, publ->lower,
|
p = tipc_nametbl_remove_publ(publ->type, publ->lower,
|
||||||
publ->node, publ->ref, publ->key);
|
publ->node, publ->ref, publ->key);
|
||||||
if (p)
|
if (p)
|
||||||
tipc_nodesub_unsubscribe(&p->subscr);
|
tipc_publ_unsubscribe(p, addr);
|
||||||
write_unlock_bh(&tipc_nametbl_lock);
|
write_unlock_bh(&tipc_nametbl_lock);
|
||||||
|
|
||||||
if (p != publ) {
|
if (p != publ) {
|
||||||
|
@ -277,6 +307,14 @@ static void named_purge_publ(struct publication *publ)
|
||||||
kfree(p);
|
kfree(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void tipc_publ_notify(struct list_head *nsub_list, u32 addr)
|
||||||
|
{
|
||||||
|
struct publication *publ, *tmp;
|
||||||
|
|
||||||
|
list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
|
||||||
|
tipc_publ_purge(publ, addr);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tipc_update_nametbl - try to process a nametable update and notify
|
* tipc_update_nametbl - try to process a nametable update and notify
|
||||||
* subscribers
|
* subscribers
|
||||||
|
@ -294,9 +332,7 @@ static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype)
|
||||||
TIPC_CLUSTER_SCOPE, node,
|
TIPC_CLUSTER_SCOPE, node,
|
||||||
ntohl(i->ref), ntohl(i->key));
|
ntohl(i->ref), ntohl(i->key));
|
||||||
if (publ) {
|
if (publ) {
|
||||||
tipc_nodesub_subscribe(&publ->subscr, node, publ,
|
tipc_publ_subscribe(publ, node);
|
||||||
(net_ev_handler)
|
|
||||||
named_purge_publ);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
} else if (dtype == WITHDRAWAL) {
|
} else if (dtype == WITHDRAWAL) {
|
||||||
|
@ -304,7 +340,7 @@ static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype)
|
||||||
node, ntohl(i->ref),
|
node, ntohl(i->ref),
|
||||||
ntohl(i->key));
|
ntohl(i->key));
|
||||||
if (publ) {
|
if (publ) {
|
||||||
tipc_nodesub_unsubscribe(&publ->subscr);
|
tipc_publ_unsubscribe(publ, node);
|
||||||
kfree(publ);
|
kfree(publ);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,5 +74,6 @@ void tipc_named_node_up(u32 dnode);
|
||||||
void tipc_named_rcv(struct sk_buff *buf);
|
void tipc_named_rcv(struct sk_buff *buf);
|
||||||
void tipc_named_reinit(void);
|
void tipc_named_reinit(void);
|
||||||
void tipc_named_process_backlog(void);
|
void tipc_named_process_backlog(void);
|
||||||
|
void tipc_publ_notify(struct list_head *nsub_list, u32 addr);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -144,7 +144,7 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
|
||||||
publ->key = key;
|
publ->key = key;
|
||||||
INIT_LIST_HEAD(&publ->local_list);
|
INIT_LIST_HEAD(&publ->local_list);
|
||||||
INIT_LIST_HEAD(&publ->pport_list);
|
INIT_LIST_HEAD(&publ->pport_list);
|
||||||
INIT_LIST_HEAD(&publ->subscr.nodesub_list);
|
INIT_LIST_HEAD(&publ->nodesub_list);
|
||||||
return publ;
|
return publ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -37,8 +37,6 @@
|
||||||
#ifndef _TIPC_NAME_TABLE_H
|
#ifndef _TIPC_NAME_TABLE_H
|
||||||
#define _TIPC_NAME_TABLE_H
|
#define _TIPC_NAME_TABLE_H
|
||||||
|
|
||||||
#include "node_subscr.h"
|
|
||||||
|
|
||||||
struct tipc_subscription;
|
struct tipc_subscription;
|
||||||
struct tipc_port_list;
|
struct tipc_port_list;
|
||||||
|
|
||||||
|
@ -56,7 +54,7 @@ struct tipc_port_list;
|
||||||
* @node: network address of publishing port's node
|
* @node: network address of publishing port's node
|
||||||
* @ref: publishing port
|
* @ref: publishing port
|
||||||
* @key: publication key
|
* @key: publication key
|
||||||
* @subscr: subscription to "node down" event (for off-node publications only)
|
* @nodesub_list: subscription to "node down" event (off-node publication only)
|
||||||
* @local_list: adjacent entries in list of publications made by this node
|
* @local_list: adjacent entries in list of publications made by this node
|
||||||
* @pport_list: adjacent entries in list of publications made by this port
|
* @pport_list: adjacent entries in list of publications made by this port
|
||||||
* @node_list: adjacent matching name seq publications with >= node scope
|
* @node_list: adjacent matching name seq publications with >= node scope
|
||||||
|
@ -73,7 +71,7 @@ struct publication {
|
||||||
u32 node;
|
u32 node;
|
||||||
u32 ref;
|
u32 ref;
|
||||||
u32 key;
|
u32 key;
|
||||||
struct tipc_node_subscr subscr;
|
struct list_head nodesub_list;
|
||||||
struct list_head local_list;
|
struct list_head local_list;
|
||||||
struct list_head pport_list;
|
struct list_head pport_list;
|
||||||
struct list_head node_list;
|
struct list_head node_list;
|
||||||
|
|
|
@ -113,9 +113,10 @@ struct tipc_node *tipc_node_create(u32 addr)
|
||||||
spin_lock_init(&n_ptr->lock);
|
spin_lock_init(&n_ptr->lock);
|
||||||
INIT_HLIST_NODE(&n_ptr->hash);
|
INIT_HLIST_NODE(&n_ptr->hash);
|
||||||
INIT_LIST_HEAD(&n_ptr->list);
|
INIT_LIST_HEAD(&n_ptr->list);
|
||||||
INIT_LIST_HEAD(&n_ptr->nsub);
|
INIT_LIST_HEAD(&n_ptr->publ_list);
|
||||||
INIT_LIST_HEAD(&n_ptr->conn_sks);
|
INIT_LIST_HEAD(&n_ptr->conn_sks);
|
||||||
__skb_queue_head_init(&n_ptr->waiting_sks);
|
__skb_queue_head_init(&n_ptr->waiting_sks);
|
||||||
|
__skb_queue_head_init(&n_ptr->bclink.deferred_queue);
|
||||||
|
|
||||||
hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]);
|
hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]);
|
||||||
|
|
||||||
|
@ -381,8 +382,7 @@ static void node_lost_contact(struct tipc_node *n_ptr)
|
||||||
|
|
||||||
/* Flush broadcast link info associated with lost node */
|
/* Flush broadcast link info associated with lost node */
|
||||||
if (n_ptr->bclink.recv_permitted) {
|
if (n_ptr->bclink.recv_permitted) {
|
||||||
kfree_skb_list(n_ptr->bclink.deferred_head);
|
__skb_queue_purge(&n_ptr->bclink.deferred_queue);
|
||||||
n_ptr->bclink.deferred_size = 0;
|
|
||||||
|
|
||||||
if (n_ptr->bclink.reasm_buf) {
|
if (n_ptr->bclink.reasm_buf) {
|
||||||
kfree_skb(n_ptr->bclink.reasm_buf);
|
kfree_skb(n_ptr->bclink.reasm_buf);
|
||||||
|
@ -574,7 +574,7 @@ void tipc_node_unlock(struct tipc_node *node)
|
||||||
skb_queue_splice_init(&node->waiting_sks, &waiting_sks);
|
skb_queue_splice_init(&node->waiting_sks, &waiting_sks);
|
||||||
|
|
||||||
if (flags & TIPC_NOTIFY_NODE_DOWN) {
|
if (flags & TIPC_NOTIFY_NODE_DOWN) {
|
||||||
list_replace_init(&node->nsub, &nsub_list);
|
list_replace_init(&node->publ_list, &nsub_list);
|
||||||
list_replace_init(&node->conn_sks, &conn_sks);
|
list_replace_init(&node->conn_sks, &conn_sks);
|
||||||
}
|
}
|
||||||
node->action_flags &= ~(TIPC_WAKEUP_USERS | TIPC_NOTIFY_NODE_DOWN |
|
node->action_flags &= ~(TIPC_WAKEUP_USERS | TIPC_NOTIFY_NODE_DOWN |
|
||||||
|
@ -591,7 +591,7 @@ void tipc_node_unlock(struct tipc_node *node)
|
||||||
tipc_node_abort_sock_conns(&conn_sks);
|
tipc_node_abort_sock_conns(&conn_sks);
|
||||||
|
|
||||||
if (!list_empty(&nsub_list))
|
if (!list_empty(&nsub_list))
|
||||||
tipc_nodesub_notify(&nsub_list);
|
tipc_publ_notify(&nsub_list, addr);
|
||||||
|
|
||||||
if (flags & TIPC_WAKEUP_BCAST_USERS)
|
if (flags & TIPC_WAKEUP_BCAST_USERS)
|
||||||
tipc_bclink_wakeup_users();
|
tipc_bclink_wakeup_users();
|
||||||
|
|
|
@ -37,7 +37,6 @@
|
||||||
#ifndef _TIPC_NODE_H
|
#ifndef _TIPC_NODE_H
|
||||||
#define _TIPC_NODE_H
|
#define _TIPC_NODE_H
|
||||||
|
|
||||||
#include "node_subscr.h"
|
|
||||||
#include "addr.h"
|
#include "addr.h"
|
||||||
#include "net.h"
|
#include "net.h"
|
||||||
#include "bearer.h"
|
#include "bearer.h"
|
||||||
|
@ -72,9 +71,7 @@ enum {
|
||||||
* @last_in: sequence # of last in-sequence b'cast message received from node
|
* @last_in: sequence # of last in-sequence b'cast message received from node
|
||||||
* @last_sent: sequence # of last b'cast message sent by node
|
* @last_sent: sequence # of last b'cast message sent by node
|
||||||
* @oos_state: state tracker for handling OOS b'cast messages
|
* @oos_state: state tracker for handling OOS b'cast messages
|
||||||
* @deferred_size: number of OOS b'cast messages in deferred queue
|
* @deferred_queue: deferred queue saved OOS b'cast message received from node
|
||||||
* @deferred_head: oldest OOS b'cast message received from node
|
|
||||||
* @deferred_tail: newest OOS b'cast message received from node
|
|
||||||
* @reasm_buf: broadcast reassembly queue head from node
|
* @reasm_buf: broadcast reassembly queue head from node
|
||||||
* @recv_permitted: true if node is allowed to receive b'cast messages
|
* @recv_permitted: true if node is allowed to receive b'cast messages
|
||||||
*/
|
*/
|
||||||
|
@ -84,8 +81,7 @@ struct tipc_node_bclink {
|
||||||
u32 last_sent;
|
u32 last_sent;
|
||||||
u32 oos_state;
|
u32 oos_state;
|
||||||
u32 deferred_size;
|
u32 deferred_size;
|
||||||
struct sk_buff *deferred_head;
|
struct sk_buff_head deferred_queue;
|
||||||
struct sk_buff *deferred_tail;
|
|
||||||
struct sk_buff *reasm_buf;
|
struct sk_buff *reasm_buf;
|
||||||
bool recv_permitted;
|
bool recv_permitted;
|
||||||
};
|
};
|
||||||
|
@ -104,7 +100,7 @@ struct tipc_node_bclink {
|
||||||
* @link_cnt: number of links to node
|
* @link_cnt: number of links to node
|
||||||
* @signature: node instance identifier
|
* @signature: node instance identifier
|
||||||
* @link_id: local and remote bearer ids of changing link, if any
|
* @link_id: local and remote bearer ids of changing link, if any
|
||||||
* @nsub: list of "node down" subscriptions monitoring node
|
* @publ_list: list of publications
|
||||||
* @rcu: rcu struct for tipc_node
|
* @rcu: rcu struct for tipc_node
|
||||||
*/
|
*/
|
||||||
struct tipc_node {
|
struct tipc_node {
|
||||||
|
@ -121,7 +117,7 @@ struct tipc_node {
|
||||||
int working_links;
|
int working_links;
|
||||||
u32 signature;
|
u32 signature;
|
||||||
u32 link_id;
|
u32 link_id;
|
||||||
struct list_head nsub;
|
struct list_head publ_list;
|
||||||
struct sk_buff_head waiting_sks;
|
struct sk_buff_head waiting_sks;
|
||||||
struct list_head conn_sks;
|
struct list_head conn_sks;
|
||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
|
|
|
@ -1,96 +0,0 @@
|
||||||
/*
|
|
||||||
* net/tipc/node_subscr.c: TIPC "node down" subscription handling
|
|
||||||
*
|
|
||||||
* Copyright (c) 1995-2006, Ericsson AB
|
|
||||||
* Copyright (c) 2005, 2010-2011, Wind River Systems
|
|
||||||
* All rights reserved.
|
|
||||||
*
|
|
||||||
* Redistribution and use in source and binary forms, with or without
|
|
||||||
* modification, are permitted provided that the following conditions are met:
|
|
||||||
*
|
|
||||||
* 1. Redistributions of source code must retain the above copyright
|
|
||||||
* notice, this list of conditions and the following disclaimer.
|
|
||||||
* 2. Redistributions in binary form must reproduce the above copyright
|
|
||||||
* notice, this list of conditions and the following disclaimer in the
|
|
||||||
* documentation and/or other materials provided with the distribution.
|
|
||||||
* 3. Neither the names of the copyright holders nor the names of its
|
|
||||||
* contributors may be used to endorse or promote products derived from
|
|
||||||
* this software without specific prior written permission.
|
|
||||||
*
|
|
||||||
* Alternatively, this software may be distributed under the terms of the
|
|
||||||
* GNU General Public License ("GPL") version 2 as published by the Free
|
|
||||||
* Software Foundation.
|
|
||||||
*
|
|
||||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
||||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
* POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "core.h"
|
|
||||||
#include "node_subscr.h"
|
|
||||||
#include "node.h"
|
|
||||||
|
|
||||||
/**
|
|
||||||
* tipc_nodesub_subscribe - create "node down" subscription for specified node
|
|
||||||
*/
|
|
||||||
void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
|
|
||||||
void *usr_handle, net_ev_handler handle_down)
|
|
||||||
{
|
|
||||||
if (in_own_node(addr)) {
|
|
||||||
node_sub->node = NULL;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
node_sub->node = tipc_node_find(addr);
|
|
||||||
if (!node_sub->node) {
|
|
||||||
pr_warn("Node subscription rejected, unknown node 0x%x\n",
|
|
||||||
addr);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
node_sub->handle_node_down = handle_down;
|
|
||||||
node_sub->usr_handle = usr_handle;
|
|
||||||
|
|
||||||
tipc_node_lock(node_sub->node);
|
|
||||||
list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub);
|
|
||||||
tipc_node_unlock(node_sub->node);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* tipc_nodesub_unsubscribe - cancel "node down" subscription (if any)
|
|
||||||
*/
|
|
||||||
void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
|
|
||||||
{
|
|
||||||
if (!node_sub->node)
|
|
||||||
return;
|
|
||||||
|
|
||||||
tipc_node_lock(node_sub->node);
|
|
||||||
list_del_init(&node_sub->nodesub_list);
|
|
||||||
tipc_node_unlock(node_sub->node);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* tipc_nodesub_notify - notify subscribers that a node is unreachable
|
|
||||||
*
|
|
||||||
* Note: node is locked by caller
|
|
||||||
*/
|
|
||||||
void tipc_nodesub_notify(struct list_head *nsub_list)
|
|
||||||
{
|
|
||||||
struct tipc_node_subscr *ns, *safe;
|
|
||||||
net_ev_handler handle_node_down;
|
|
||||||
|
|
||||||
list_for_each_entry_safe(ns, safe, nsub_list, nodesub_list) {
|
|
||||||
handle_node_down = ns->handle_node_down;
|
|
||||||
if (handle_node_down) {
|
|
||||||
ns->handle_node_down = NULL;
|
|
||||||
handle_node_down(ns->usr_handle);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,63 +0,0 @@
|
||||||
/*
|
|
||||||
* net/tipc/node_subscr.h: Include file for TIPC "node down" subscription handling
|
|
||||||
*
|
|
||||||
* Copyright (c) 1995-2006, Ericsson AB
|
|
||||||
* Copyright (c) 2005, 2010-2011, Wind River Systems
|
|
||||||
* All rights reserved.
|
|
||||||
*
|
|
||||||
* Redistribution and use in source and binary forms, with or without
|
|
||||||
* modification, are permitted provided that the following conditions are met:
|
|
||||||
*
|
|
||||||
* 1. Redistributions of source code must retain the above copyright
|
|
||||||
* notice, this list of conditions and the following disclaimer.
|
|
||||||
* 2. Redistributions in binary form must reproduce the above copyright
|
|
||||||
* notice, this list of conditions and the following disclaimer in the
|
|
||||||
* documentation and/or other materials provided with the distribution.
|
|
||||||
* 3. Neither the names of the copyright holders nor the names of its
|
|
||||||
* contributors may be used to endorse or promote products derived from
|
|
||||||
* this software without specific prior written permission.
|
|
||||||
*
|
|
||||||
* Alternatively, this software may be distributed under the terms of the
|
|
||||||
* GNU General Public License ("GPL") version 2 as published by the Free
|
|
||||||
* Software Foundation.
|
|
||||||
*
|
|
||||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
||||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
* POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _TIPC_NODE_SUBSCR_H
|
|
||||||
#define _TIPC_NODE_SUBSCR_H
|
|
||||||
|
|
||||||
#include "addr.h"
|
|
||||||
|
|
||||||
typedef void (*net_ev_handler) (void *usr_handle);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* struct tipc_node_subscr - "node down" subscription entry
|
|
||||||
* @node: ptr to node structure of interest (or NULL, if none)
|
|
||||||
* @handle_node_down: routine to invoke when node fails
|
|
||||||
* @usr_handle: argument to pass to routine when node fails
|
|
||||||
* @nodesub_list: adjacent entries in list of subscriptions for the node
|
|
||||||
*/
|
|
||||||
struct tipc_node_subscr {
|
|
||||||
struct tipc_node *node;
|
|
||||||
net_ev_handler handle_node_down;
|
|
||||||
void *usr_handle;
|
|
||||||
struct list_head nodesub_list;
|
|
||||||
};
|
|
||||||
|
|
||||||
void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
|
|
||||||
void *usr_handle, net_ev_handler handle_down);
|
|
||||||
void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub);
|
|
||||||
void tipc_nodesub_notify(struct list_head *nsub_list);
|
|
||||||
|
|
||||||
#endif
|
|
|
@ -244,12 +244,12 @@ static void tsk_advance_rx_queue(struct sock *sk)
|
||||||
*/
|
*/
|
||||||
static void tsk_rej_rx_queue(struct sock *sk)
|
static void tsk_rej_rx_queue(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct sk_buff *buf;
|
struct sk_buff *skb;
|
||||||
u32 dnode;
|
u32 dnode;
|
||||||
|
|
||||||
while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
|
while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
|
||||||
if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT))
|
if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
|
||||||
tipc_link_xmit(buf, dnode, 0);
|
tipc_link_xmit_skb(skb, dnode, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -462,7 +462,7 @@ static int tipc_release(struct socket *sock)
|
||||||
{
|
{
|
||||||
struct sock *sk = sock->sk;
|
struct sock *sk = sock->sk;
|
||||||
struct tipc_sock *tsk;
|
struct tipc_sock *tsk;
|
||||||
struct sk_buff *buf;
|
struct sk_buff *skb;
|
||||||
u32 dnode;
|
u32 dnode;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -481,11 +481,11 @@ static int tipc_release(struct socket *sock)
|
||||||
*/
|
*/
|
||||||
dnode = tsk_peer_node(tsk);
|
dnode = tsk_peer_node(tsk);
|
||||||
while (sock->state != SS_DISCONNECTING) {
|
while (sock->state != SS_DISCONNECTING) {
|
||||||
buf = __skb_dequeue(&sk->sk_receive_queue);
|
skb = __skb_dequeue(&sk->sk_receive_queue);
|
||||||
if (buf == NULL)
|
if (skb == NULL)
|
||||||
break;
|
break;
|
||||||
if (TIPC_SKB_CB(buf)->handle != NULL)
|
if (TIPC_SKB_CB(skb)->handle != NULL)
|
||||||
kfree_skb(buf);
|
kfree_skb(skb);
|
||||||
else {
|
else {
|
||||||
if ((sock->state == SS_CONNECTING) ||
|
if ((sock->state == SS_CONNECTING) ||
|
||||||
(sock->state == SS_CONNECTED)) {
|
(sock->state == SS_CONNECTED)) {
|
||||||
|
@ -493,8 +493,8 @@ static int tipc_release(struct socket *sock)
|
||||||
tsk->connected = 0;
|
tsk->connected = 0;
|
||||||
tipc_node_remove_conn(dnode, tsk->ref);
|
tipc_node_remove_conn(dnode, tsk->ref);
|
||||||
}
|
}
|
||||||
if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT))
|
if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
|
||||||
tipc_link_xmit(buf, dnode, 0);
|
tipc_link_xmit_skb(skb, dnode, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -502,12 +502,12 @@ static int tipc_release(struct socket *sock)
|
||||||
tipc_sk_ref_discard(tsk->ref);
|
tipc_sk_ref_discard(tsk->ref);
|
||||||
k_cancel_timer(&tsk->timer);
|
k_cancel_timer(&tsk->timer);
|
||||||
if (tsk->connected) {
|
if (tsk->connected) {
|
||||||
buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
|
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
|
||||||
SHORT_H_SIZE, 0, dnode, tipc_own_addr,
|
SHORT_H_SIZE, 0, dnode, tipc_own_addr,
|
||||||
tsk_peer_port(tsk),
|
tsk_peer_port(tsk),
|
||||||
tsk->ref, TIPC_ERR_NO_PORT);
|
tsk->ref, TIPC_ERR_NO_PORT);
|
||||||
if (buf)
|
if (skb)
|
||||||
tipc_link_xmit(buf, dnode, tsk->ref);
|
tipc_link_xmit_skb(skb, dnode, tsk->ref);
|
||||||
tipc_node_remove_conn(dnode, tsk->ref);
|
tipc_node_remove_conn(dnode, tsk->ref);
|
||||||
}
|
}
|
||||||
k_term_timer(&tsk->timer);
|
k_term_timer(&tsk->timer);
|
||||||
|
@ -712,7 +712,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
|
||||||
{
|
{
|
||||||
struct sock *sk = sock->sk;
|
struct sock *sk = sock->sk;
|
||||||
struct tipc_msg *mhdr = &tipc_sk(sk)->phdr;
|
struct tipc_msg *mhdr = &tipc_sk(sk)->phdr;
|
||||||
struct sk_buff *buf;
|
struct sk_buff_head head;
|
||||||
uint mtu;
|
uint mtu;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
@ -727,12 +727,13 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
|
||||||
|
|
||||||
new_mtu:
|
new_mtu:
|
||||||
mtu = tipc_bclink_get_mtu();
|
mtu = tipc_bclink_get_mtu();
|
||||||
rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &buf);
|
__skb_queue_head_init(&head);
|
||||||
|
rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &head);
|
||||||
if (unlikely(rc < 0))
|
if (unlikely(rc < 0))
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
rc = tipc_bclink_xmit(buf);
|
rc = tipc_bclink_xmit(&head);
|
||||||
if (likely(rc >= 0)) {
|
if (likely(rc >= 0)) {
|
||||||
rc = dsz;
|
rc = dsz;
|
||||||
break;
|
break;
|
||||||
|
@ -744,7 +745,7 @@ new_mtu:
|
||||||
tipc_sk(sk)->link_cong = 1;
|
tipc_sk(sk)->link_cong = 1;
|
||||||
rc = tipc_wait_for_sndmsg(sock, &timeo);
|
rc = tipc_wait_for_sndmsg(sock, &timeo);
|
||||||
if (rc)
|
if (rc)
|
||||||
kfree_skb_list(buf);
|
__skb_queue_purge(&head);
|
||||||
} while (!rc);
|
} while (!rc);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -906,7 +907,8 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
|
||||||
struct tipc_sock *tsk = tipc_sk(sk);
|
struct tipc_sock *tsk = tipc_sk(sk);
|
||||||
struct tipc_msg *mhdr = &tsk->phdr;
|
struct tipc_msg *mhdr = &tsk->phdr;
|
||||||
u32 dnode, dport;
|
u32 dnode, dport;
|
||||||
struct sk_buff *buf;
|
struct sk_buff_head head;
|
||||||
|
struct sk_buff *skb;
|
||||||
struct tipc_name_seq *seq = &dest->addr.nameseq;
|
struct tipc_name_seq *seq = &dest->addr.nameseq;
|
||||||
u32 mtu;
|
u32 mtu;
|
||||||
long timeo;
|
long timeo;
|
||||||
|
@ -981,13 +983,15 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
|
||||||
|
|
||||||
new_mtu:
|
new_mtu:
|
||||||
mtu = tipc_node_get_mtu(dnode, tsk->ref);
|
mtu = tipc_node_get_mtu(dnode, tsk->ref);
|
||||||
rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &buf);
|
__skb_queue_head_init(&head);
|
||||||
|
rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &head);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto exit;
|
goto exit;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
TIPC_SKB_CB(buf)->wakeup_pending = tsk->link_cong;
|
skb = skb_peek(&head);
|
||||||
rc = tipc_link_xmit(buf, dnode, tsk->ref);
|
TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
|
||||||
|
rc = tipc_link_xmit(&head, dnode, tsk->ref);
|
||||||
if (likely(rc >= 0)) {
|
if (likely(rc >= 0)) {
|
||||||
if (sock->state != SS_READY)
|
if (sock->state != SS_READY)
|
||||||
sock->state = SS_CONNECTING;
|
sock->state = SS_CONNECTING;
|
||||||
|
@ -1001,7 +1005,7 @@ new_mtu:
|
||||||
tsk->link_cong = 1;
|
tsk->link_cong = 1;
|
||||||
rc = tipc_wait_for_sndmsg(sock, &timeo);
|
rc = tipc_wait_for_sndmsg(sock, &timeo);
|
||||||
if (rc)
|
if (rc)
|
||||||
kfree_skb_list(buf);
|
__skb_queue_purge(&head);
|
||||||
} while (!rc);
|
} while (!rc);
|
||||||
exit:
|
exit:
|
||||||
if (iocb)
|
if (iocb)
|
||||||
|
@ -1058,7 +1062,7 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
|
||||||
struct sock *sk = sock->sk;
|
struct sock *sk = sock->sk;
|
||||||
struct tipc_sock *tsk = tipc_sk(sk);
|
struct tipc_sock *tsk = tipc_sk(sk);
|
||||||
struct tipc_msg *mhdr = &tsk->phdr;
|
struct tipc_msg *mhdr = &tsk->phdr;
|
||||||
struct sk_buff *buf;
|
struct sk_buff_head head;
|
||||||
DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
|
DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
|
||||||
u32 ref = tsk->ref;
|
u32 ref = tsk->ref;
|
||||||
int rc = -EINVAL;
|
int rc = -EINVAL;
|
||||||
|
@ -1093,12 +1097,13 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
|
||||||
next:
|
next:
|
||||||
mtu = tsk->max_pkt;
|
mtu = tsk->max_pkt;
|
||||||
send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
|
send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
|
||||||
rc = tipc_msg_build(mhdr, m, sent, send, mtu, &buf);
|
__skb_queue_head_init(&head);
|
||||||
|
rc = tipc_msg_build(mhdr, m, sent, send, mtu, &head);
|
||||||
if (unlikely(rc < 0))
|
if (unlikely(rc < 0))
|
||||||
goto exit;
|
goto exit;
|
||||||
do {
|
do {
|
||||||
if (likely(!tsk_conn_cong(tsk))) {
|
if (likely(!tsk_conn_cong(tsk))) {
|
||||||
rc = tipc_link_xmit(buf, dnode, ref);
|
rc = tipc_link_xmit(&head, dnode, ref);
|
||||||
if (likely(!rc)) {
|
if (likely(!rc)) {
|
||||||
tsk->sent_unacked++;
|
tsk->sent_unacked++;
|
||||||
sent += send;
|
sent += send;
|
||||||
|
@ -1116,7 +1121,7 @@ next:
|
||||||
}
|
}
|
||||||
rc = tipc_wait_for_sndpkt(sock, &timeo);
|
rc = tipc_wait_for_sndpkt(sock, &timeo);
|
||||||
if (rc)
|
if (rc)
|
||||||
kfree_skb_list(buf);
|
__skb_queue_purge(&head);
|
||||||
} while (!rc);
|
} while (!rc);
|
||||||
exit:
|
exit:
|
||||||
if (iocb)
|
if (iocb)
|
||||||
|
@ -1261,20 +1266,20 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
|
||||||
|
|
||||||
static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
|
static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
|
||||||
{
|
{
|
||||||
struct sk_buff *buf = NULL;
|
struct sk_buff *skb = NULL;
|
||||||
struct tipc_msg *msg;
|
struct tipc_msg *msg;
|
||||||
u32 peer_port = tsk_peer_port(tsk);
|
u32 peer_port = tsk_peer_port(tsk);
|
||||||
u32 dnode = tsk_peer_node(tsk);
|
u32 dnode = tsk_peer_node(tsk);
|
||||||
|
|
||||||
if (!tsk->connected)
|
if (!tsk->connected)
|
||||||
return;
|
return;
|
||||||
buf = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode,
|
skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode,
|
||||||
tipc_own_addr, peer_port, tsk->ref, TIPC_OK);
|
tipc_own_addr, peer_port, tsk->ref, TIPC_OK);
|
||||||
if (!buf)
|
if (!skb)
|
||||||
return;
|
return;
|
||||||
msg = buf_msg(buf);
|
msg = buf_msg(skb);
|
||||||
msg_set_msgcnt(msg, ack);
|
msg_set_msgcnt(msg, ack);
|
||||||
tipc_link_xmit(buf, dnode, msg_link_selector(msg));
|
tipc_link_xmit_skb(skb, dnode, msg_link_selector(msg));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
|
static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
|
||||||
|
@ -1729,20 +1734,20 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf)
|
||||||
/**
|
/**
|
||||||
* tipc_backlog_rcv - handle incoming message from backlog queue
|
* tipc_backlog_rcv - handle incoming message from backlog queue
|
||||||
* @sk: socket
|
* @sk: socket
|
||||||
* @buf: message
|
* @skb: message
|
||||||
*
|
*
|
||||||
* Caller must hold socket lock, but not port lock.
|
* Caller must hold socket lock, but not port lock.
|
||||||
*
|
*
|
||||||
* Returns 0
|
* Returns 0
|
||||||
*/
|
*/
|
||||||
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf)
|
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
u32 onode;
|
u32 onode;
|
||||||
struct tipc_sock *tsk = tipc_sk(sk);
|
struct tipc_sock *tsk = tipc_sk(sk);
|
||||||
uint truesize = buf->truesize;
|
uint truesize = skb->truesize;
|
||||||
|
|
||||||
rc = filter_rcv(sk, buf);
|
rc = filter_rcv(sk, skb);
|
||||||
|
|
||||||
if (likely(!rc)) {
|
if (likely(!rc)) {
|
||||||
if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
|
if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
|
||||||
|
@ -1750,25 +1755,25 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((rc < 0) && !tipc_msg_reverse(buf, &onode, -rc))
|
if ((rc < 0) && !tipc_msg_reverse(skb, &onode, -rc))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
tipc_link_xmit(buf, onode, 0);
|
tipc_link_xmit_skb(skb, onode, 0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tipc_sk_rcv - handle incoming message
|
* tipc_sk_rcv - handle incoming message
|
||||||
* @buf: buffer containing arriving message
|
* @skb: buffer containing arriving message
|
||||||
* Consumes buffer
|
* Consumes buffer
|
||||||
* Returns 0 if success, or errno: -EHOSTUNREACH
|
* Returns 0 if success, or errno: -EHOSTUNREACH
|
||||||
*/
|
*/
|
||||||
int tipc_sk_rcv(struct sk_buff *buf)
|
int tipc_sk_rcv(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct tipc_sock *tsk;
|
struct tipc_sock *tsk;
|
||||||
struct sock *sk;
|
struct sock *sk;
|
||||||
u32 dport = msg_destport(buf_msg(buf));
|
u32 dport = msg_destport(buf_msg(skb));
|
||||||
int rc = TIPC_OK;
|
int rc = TIPC_OK;
|
||||||
uint limit;
|
uint limit;
|
||||||
u32 dnode;
|
u32 dnode;
|
||||||
|
@ -1776,7 +1781,7 @@ int tipc_sk_rcv(struct sk_buff *buf)
|
||||||
/* Validate destination and message */
|
/* Validate destination and message */
|
||||||
tsk = tipc_sk_get(dport);
|
tsk = tipc_sk_get(dport);
|
||||||
if (unlikely(!tsk)) {
|
if (unlikely(!tsk)) {
|
||||||
rc = tipc_msg_eval(buf, &dnode);
|
rc = tipc_msg_eval(skb, &dnode);
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
sk = &tsk->sk;
|
sk = &tsk->sk;
|
||||||
|
@ -1785,12 +1790,12 @@ int tipc_sk_rcv(struct sk_buff *buf)
|
||||||
spin_lock_bh(&sk->sk_lock.slock);
|
spin_lock_bh(&sk->sk_lock.slock);
|
||||||
|
|
||||||
if (!sock_owned_by_user(sk)) {
|
if (!sock_owned_by_user(sk)) {
|
||||||
rc = filter_rcv(sk, buf);
|
rc = filter_rcv(sk, skb);
|
||||||
} else {
|
} else {
|
||||||
if (sk->sk_backlog.len == 0)
|
if (sk->sk_backlog.len == 0)
|
||||||
atomic_set(&tsk->dupl_rcvcnt, 0);
|
atomic_set(&tsk->dupl_rcvcnt, 0);
|
||||||
limit = rcvbuf_limit(sk, buf) + atomic_read(&tsk->dupl_rcvcnt);
|
limit = rcvbuf_limit(sk, skb) + atomic_read(&tsk->dupl_rcvcnt);
|
||||||
if (sk_add_backlog(sk, buf, limit))
|
if (sk_add_backlog(sk, skb, limit))
|
||||||
rc = -TIPC_ERR_OVERLOAD;
|
rc = -TIPC_ERR_OVERLOAD;
|
||||||
}
|
}
|
||||||
spin_unlock_bh(&sk->sk_lock.slock);
|
spin_unlock_bh(&sk->sk_lock.slock);
|
||||||
|
@ -1798,10 +1803,10 @@ int tipc_sk_rcv(struct sk_buff *buf)
|
||||||
if (likely(!rc))
|
if (likely(!rc))
|
||||||
return 0;
|
return 0;
|
||||||
exit:
|
exit:
|
||||||
if ((rc < 0) && !tipc_msg_reverse(buf, &dnode, -rc))
|
if ((rc < 0) && !tipc_msg_reverse(skb, &dnode, -rc))
|
||||||
return -EHOSTUNREACH;
|
return -EHOSTUNREACH;
|
||||||
|
|
||||||
tipc_link_xmit(buf, dnode, 0);
|
tipc_link_xmit_skb(skb, dnode, 0);
|
||||||
return (rc < 0) ? -EHOSTUNREACH : 0;
|
return (rc < 0) ? -EHOSTUNREACH : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2059,7 +2064,7 @@ static int tipc_shutdown(struct socket *sock, int how)
|
||||||
{
|
{
|
||||||
struct sock *sk = sock->sk;
|
struct sock *sk = sock->sk;
|
||||||
struct tipc_sock *tsk = tipc_sk(sk);
|
struct tipc_sock *tsk = tipc_sk(sk);
|
||||||
struct sk_buff *buf;
|
struct sk_buff *skb;
|
||||||
u32 dnode;
|
u32 dnode;
|
||||||
int res;
|
int res;
|
||||||
|
|
||||||
|
@ -2074,23 +2079,23 @@ static int tipc_shutdown(struct socket *sock, int how)
|
||||||
|
|
||||||
restart:
|
restart:
|
||||||
/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
|
/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
|
||||||
buf = __skb_dequeue(&sk->sk_receive_queue);
|
skb = __skb_dequeue(&sk->sk_receive_queue);
|
||||||
if (buf) {
|
if (skb) {
|
||||||
if (TIPC_SKB_CB(buf)->handle != NULL) {
|
if (TIPC_SKB_CB(skb)->handle != NULL) {
|
||||||
kfree_skb(buf);
|
kfree_skb(skb);
|
||||||
goto restart;
|
goto restart;
|
||||||
}
|
}
|
||||||
if (tipc_msg_reverse(buf, &dnode, TIPC_CONN_SHUTDOWN))
|
if (tipc_msg_reverse(skb, &dnode, TIPC_CONN_SHUTDOWN))
|
||||||
tipc_link_xmit(buf, dnode, tsk->ref);
|
tipc_link_xmit_skb(skb, dnode, tsk->ref);
|
||||||
tipc_node_remove_conn(dnode, tsk->ref);
|
tipc_node_remove_conn(dnode, tsk->ref);
|
||||||
} else {
|
} else {
|
||||||
dnode = tsk_peer_node(tsk);
|
dnode = tsk_peer_node(tsk);
|
||||||
buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
|
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
|
||||||
TIPC_CONN_MSG, SHORT_H_SIZE,
|
TIPC_CONN_MSG, SHORT_H_SIZE,
|
||||||
0, dnode, tipc_own_addr,
|
0, dnode, tipc_own_addr,
|
||||||
tsk_peer_port(tsk),
|
tsk_peer_port(tsk),
|
||||||
tsk->ref, TIPC_CONN_SHUTDOWN);
|
tsk->ref, TIPC_CONN_SHUTDOWN);
|
||||||
tipc_link_xmit(buf, dnode, tsk->ref);
|
tipc_link_xmit_skb(skb, dnode, tsk->ref);
|
||||||
}
|
}
|
||||||
tsk->connected = 0;
|
tsk->connected = 0;
|
||||||
sock->state = SS_DISCONNECTING;
|
sock->state = SS_DISCONNECTING;
|
||||||
|
@ -2119,7 +2124,7 @@ static void tipc_sk_timeout(unsigned long ref)
|
||||||
{
|
{
|
||||||
struct tipc_sock *tsk;
|
struct tipc_sock *tsk;
|
||||||
struct sock *sk;
|
struct sock *sk;
|
||||||
struct sk_buff *buf = NULL;
|
struct sk_buff *skb = NULL;
|
||||||
u32 peer_port, peer_node;
|
u32 peer_port, peer_node;
|
||||||
|
|
||||||
tsk = tipc_sk_get(ref);
|
tsk = tipc_sk_get(ref);
|
||||||
|
@ -2137,20 +2142,20 @@ static void tipc_sk_timeout(unsigned long ref)
|
||||||
|
|
||||||
if (tsk->probing_state == TIPC_CONN_PROBING) {
|
if (tsk->probing_state == TIPC_CONN_PROBING) {
|
||||||
/* Previous probe not answered -> self abort */
|
/* Previous probe not answered -> self abort */
|
||||||
buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
|
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
|
||||||
SHORT_H_SIZE, 0, tipc_own_addr,
|
SHORT_H_SIZE, 0, tipc_own_addr,
|
||||||
peer_node, ref, peer_port,
|
peer_node, ref, peer_port,
|
||||||
TIPC_ERR_NO_PORT);
|
TIPC_ERR_NO_PORT);
|
||||||
} else {
|
} else {
|
||||||
buf = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE,
|
skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE,
|
||||||
0, peer_node, tipc_own_addr,
|
0, peer_node, tipc_own_addr,
|
||||||
peer_port, ref, TIPC_OK);
|
peer_port, ref, TIPC_OK);
|
||||||
tsk->probing_state = TIPC_CONN_PROBING;
|
tsk->probing_state = TIPC_CONN_PROBING;
|
||||||
k_start_timer(&tsk->timer, tsk->probing_interval);
|
k_start_timer(&tsk->timer, tsk->probing_interval);
|
||||||
}
|
}
|
||||||
bh_unlock_sock(sk);
|
bh_unlock_sock(sk);
|
||||||
if (buf)
|
if (skb)
|
||||||
tipc_link_xmit(buf, peer_node, ref);
|
tipc_link_xmit_skb(skb, peer_node, ref);
|
||||||
exit:
|
exit:
|
||||||
tipc_sk_put(tsk);
|
tipc_sk_put(tsk);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue