diff options
Diffstat (limited to 'net/core')
| -rw-r--r-- | net/core/dev.c | 64 | ||||
| -rw-r--r-- | net/core/dst.c | 10 | ||||
| -rw-r--r-- | net/core/netpoll.c | 99 | ||||
| -rw-r--r-- | net/core/netprio_cgroup.c | 30 | ||||
| -rw-r--r-- | net/core/pktgen.c | 2 | ||||
| -rw-r--r-- | net/core/scm.c | 4 | ||||
| -rw-r--r-- | net/core/skbuff.c | 4 | ||||
| -rw-r--r-- | net/core/sock.c | 13 | 
8 files changed, 150 insertions, 76 deletions
| diff --git a/net/core/dev.c b/net/core/dev.c index 0cb3fe8d8e72..36c4a0cdb6c1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1055,6 +1055,8 @@ rollback:   */  int dev_set_alias(struct net_device *dev, const char *alias, size_t len)  { +	char *new_ifalias; +  	ASSERT_RTNL();  	if (len >= IFALIASZ) @@ -1068,9 +1070,10 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)  		return 0;  	} -	dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); -	if (!dev->ifalias) +	new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); +	if (!new_ifalias)  		return -ENOMEM; +	dev->ifalias = new_ifalias;  	strlcpy(dev->ifalias, alias, len+1);  	return len; @@ -1639,6 +1642,19 @@ static inline int deliver_skb(struct sk_buff *skb,  	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);  } +static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) +{ +	if (ptype->af_packet_priv == NULL) +		return false; + +	if (ptype->id_match) +		return ptype->id_match(ptype, skb->sk); +	else if ((struct sock *)ptype->af_packet_priv == skb->sk) +		return true; + +	return false; +} +  /*   *	Support routine. Sends outgoing frames to any network   *	taps currently in use. @@ -1656,8 +1672,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)  		 * they originated from - MvS (miquels@drinkel.ow.org)  		 */  		if ((ptype->dev == dev || !ptype->dev) && -		    (ptype->af_packet_priv == NULL || -		     (struct sock *)ptype->af_packet_priv != skb->sk)) { +		    (!skb_loop_sk(ptype, skb))) {  			if (pt_prev) {  				deliver_skb(skb2, pt_prev, skb->dev);  				pt_prev = ptype; @@ -2119,7 +2134,8 @@ static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)  static netdev_features_t harmonize_features(struct sk_buff *skb,  	__be16 protocol, netdev_features_t features)  { -	if (!can_checksum_protocol(features, protocol)) { +	if (skb->ip_summed != CHECKSUM_NONE && +	    !can_checksum_protocol(features, protocol)) {  		features &= ~NETIF_F_ALL_CSUM;  		features &= ~NETIF_F_SG;  	} else if (illegal_highdma(skb->dev, skb)) { @@ -2134,6 +2150,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)  	__be16 protocol = skb->protocol;  	netdev_features_t features = skb->dev->features; +	if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) +		features &= ~NETIF_F_GSO_MASK; +  	if (protocol == htons(ETH_P_8021Q)) {  		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;  		protocol = veh->h_vlan_encapsulated_proto; @@ -2629,15 +2648,16 @@ void __skb_get_rxhash(struct sk_buff *skb)  	if (!skb_flow_dissect(skb, &keys))  		return; -	if (keys.ports) { -		if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]) -			swap(keys.port16[0], keys.port16[1]); +	if (keys.ports)  		skb->l4_rxhash = 1; -	}  	/* get a consistent hash (same value on both flow directions) */ -	if ((__force u32)keys.dst < (__force u32)keys.src) +	if (((__force u32)keys.dst < (__force u32)keys.src) || +	    (((__force u32)keys.dst == (__force u32)keys.src) && +	     ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {  		swap(keys.dst, keys.src); +		swap(keys.port16[0], keys.port16[1]); +	}  	hash = jhash_3words((__force u32)keys.dst,  			    (__force u32)keys.src, @@ -3303,7 +3323,7 @@ ncls:  	if (pt_prev) {  		if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) -			ret = -ENOMEM; +			goto drop;  		else  			ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);  	} else { @@ -5726,6 +5746,7 @@ EXPORT_SYMBOL(netdev_refcnt_read);  /**   * netdev_wait_allrefs - wait until all references are gone. + * @dev: target net_device   *   * This is called when unregistering network devices.   * @@ -5986,6 +6007,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,  	dev_net_set(dev, &init_net);  	dev->gso_max_size = GSO_MAX_SIZE; +	dev->gso_max_segs = GSO_MAX_SEGS;  	INIT_LIST_HEAD(&dev->napi_list);  	INIT_LIST_HEAD(&dev->unreg_list); @@ -6402,22 +6424,26 @@ const char *netdev_drivername(const struct net_device *dev)  	return empty;  } -int __netdev_printk(const char *level, const struct net_device *dev, +static int __netdev_printk(const char *level, const struct net_device *dev,  			   struct va_format *vaf)  {  	int r; -	if (dev && dev->dev.parent) -		r = dev_printk(level, dev->dev.parent, "%s: %pV", -			       netdev_name(dev), vaf); -	else if (dev) +	if (dev && dev->dev.parent) { +		r = dev_printk_emit(level[1] - '0', +				    dev->dev.parent, +				    "%s %s %s: %pV", +				    dev_driver_string(dev->dev.parent), +				    dev_name(dev->dev.parent), +				    netdev_name(dev), vaf); +	} else if (dev) {  		r = printk("%s%s: %pV", level, netdev_name(dev), vaf); -	else +	} else {  		r = printk("%s(NULL net_device): %pV", level, vaf); +	}  	return r;  } -EXPORT_SYMBOL(__netdev_printk);  int netdev_printk(const char *level, const struct net_device *dev,  		  const char *format, ...) @@ -6432,6 +6458,7 @@ int netdev_printk(const char *level, const struct net_device *dev,  	vaf.va = &args;  	r = __netdev_printk(level, dev, &vaf); +  	va_end(args);  	return r; @@ -6451,6 +6478,7 @@ int func(const struct net_device *dev, const char *fmt, ...)	\  	vaf.va = &args;						\  								\  	r = __netdev_printk(level, dev, &vaf);			\ +								\  	va_end(args);						\  								\  	return r;						\ diff --git a/net/core/dst.c b/net/core/dst.c index 069d51d29414..56d63612e1e4 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -149,7 +149,15 @@ int dst_discard(struct sk_buff *skb)  }  EXPORT_SYMBOL(dst_discard); -const u32 dst_default_metrics[RTAX_MAX]; +const u32 dst_default_metrics[RTAX_MAX + 1] = { +	/* This initializer is needed to force linker to place this variable +	 * into const section. Otherwise it might end into bss section. +	 * We really want to avoid false sharing on this variable, and catch +	 * any writes on it. +	 */ +	[RTAX_MAX] = 0xdeadbeef, +}; +  void *dst_alloc(struct dst_ops *ops, struct net_device *dev,  		int initial_ref, int initial_obsolete, unsigned short flags) diff --git a/net/core/netpoll.c b/net/core/netpoll.c index b4c90e42b443..e4ba3e70c174 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -26,6 +26,7 @@  #include <linux/workqueue.h>  #include <linux/slab.h>  #include <linux/export.h> +#include <linux/if_vlan.h>  #include <net/tcp.h>  #include <net/udp.h>  #include <asm/unaligned.h> @@ -54,7 +55,7 @@ static atomic_t trapped;  	 MAX_UDP_CHUNK)  static void zap_completion_queue(void); -static void arp_reply(struct sk_buff *skb); +static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo);  static unsigned int carrier_timeout = 4;  module_param(carrier_timeout, uint, 0644); @@ -170,7 +171,8 @@ static void poll_napi(struct net_device *dev)  	list_for_each_entry(napi, &dev->napi_list, dev_list) {  		if (napi->poll_owner != smp_processor_id() &&  		    spin_trylock(&napi->poll_lock)) { -			budget = poll_one_napi(dev->npinfo, napi, budget); +			budget = poll_one_napi(rcu_dereference_bh(dev->npinfo), +					       napi, budget);  			spin_unlock(&napi->poll_lock);  			if (!budget) @@ -185,13 +187,14 @@ static void service_arp_queue(struct netpoll_info *npi)  		struct sk_buff *skb;  		while ((skb = skb_dequeue(&npi->arp_tx))) -			arp_reply(skb); +			netpoll_arp_reply(skb, npi);  	}  }  static void netpoll_poll_dev(struct net_device *dev)  {  	const struct net_device_ops *ops; +	struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);  	if (!dev || !netif_running(dev))  		return; @@ -206,17 +209,18 @@ static void netpoll_poll_dev(struct net_device *dev)  	poll_napi(dev);  	if (dev->flags & IFF_SLAVE) { -		if (dev->npinfo) { +		if (ni) {  			struct net_device *bond_dev = dev->master;  			struct sk_buff *skb; -			while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) { +			struct netpoll_info *bond_ni = rcu_dereference_bh(bond_dev->npinfo); +			while ((skb = skb_dequeue(&ni->arp_tx))) {  				skb->dev = bond_dev; -				skb_queue_tail(&bond_dev->npinfo->arp_tx, skb); +				skb_queue_tail(&bond_ni->arp_tx, skb);  			}  		}  	} -	service_arp_queue(dev->npinfo); +	service_arp_queue(ni);  	zap_completion_queue();  } @@ -302,6 +306,7 @@ static int netpoll_owner_active(struct net_device *dev)  	return 0;  } +/* call with IRQ disabled */  void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,  			     struct net_device *dev)  { @@ -309,8 +314,11 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,  	unsigned long tries;  	const struct net_device_ops *ops = dev->netdev_ops;  	/* It is up to the caller to keep npinfo alive. */ -	struct netpoll_info *npinfo = np->dev->npinfo; +	struct netpoll_info *npinfo; + +	WARN_ON_ONCE(!irqs_disabled()); +	npinfo = rcu_dereference_bh(np->dev->npinfo);  	if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {  		__kfree_skb(skb);  		return; @@ -319,16 +327,22 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,  	/* don't get messages out of order, and no recursion */  	if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {  		struct netdev_queue *txq; -		unsigned long flags;  		txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); -		local_irq_save(flags);  		/* try until next clock tick */  		for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;  		     tries > 0; --tries) {  			if (__netif_tx_trylock(txq)) {  				if (!netif_xmit_stopped(txq)) { +					if (vlan_tx_tag_present(skb) && +					    !(netif_skb_features(skb) & NETIF_F_HW_VLAN_TX)) { +						skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); +						if (unlikely(!skb)) +							break; +						skb->vlan_tci = 0; +					} +  					status = ops->ndo_start_xmit(skb, dev);  					if (status == NETDEV_TX_OK)  						txq_trans_update(txq); @@ -347,10 +361,9 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,  		}  		WARN_ONCE(!irqs_disabled(), -			"netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n", +			"netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",  			dev->name, ops->ndo_start_xmit); -		local_irq_restore(flags);  	}  	if (status != NETDEV_TX_OK) { @@ -423,9 +436,8 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)  }  EXPORT_SYMBOL(netpoll_send_udp); -static void arp_reply(struct sk_buff *skb) +static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo)  { -	struct netpoll_info *npinfo = skb->dev->npinfo;  	struct arphdr *arp;  	unsigned char *arp_ptr;  	int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; @@ -543,13 +555,12 @@ static void arp_reply(struct sk_buff *skb)  	spin_unlock_irqrestore(&npinfo->rx_lock, flags);  } -int __netpoll_rx(struct sk_buff *skb) +int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)  {  	int proto, len, ulen;  	int hits = 0;  	const struct iphdr *iph;  	struct udphdr *uh; -	struct netpoll_info *npinfo = skb->dev->npinfo;  	struct netpoll *np, *tmp;  	if (list_empty(&npinfo->rx_np)) @@ -565,6 +576,12 @@ int __netpoll_rx(struct sk_buff *skb)  		return 1;  	} +	if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { +		skb = vlan_untag(skb); +		if (unlikely(!skb)) +			goto out; +	} +  	proto = ntohs(eth_hdr(skb)->h_proto);  	if (proto != ETH_P_IP)  		goto out; @@ -715,7 +732,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)  }  EXPORT_SYMBOL(netpoll_parse_options); -int __netpoll_setup(struct netpoll *np, struct net_device *ndev) +int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)  {  	struct netpoll_info *npinfo;  	const struct net_device_ops *ops; @@ -734,7 +751,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)  	}  	if (!ndev->npinfo) { -		npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); +		npinfo = kmalloc(sizeof(*npinfo), gfp);  		if (!npinfo) {  			err = -ENOMEM;  			goto out; @@ -752,7 +769,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)  		ops = np->dev->netdev_ops;  		if (ops->ndo_netpoll_setup) { -			err = ops->ndo_netpoll_setup(ndev, npinfo); +			err = ops->ndo_netpoll_setup(ndev, npinfo, gfp);  			if (err)  				goto free_npinfo;  		} @@ -857,7 +874,7 @@ int netpoll_setup(struct netpoll *np)  	refill_skbs();  	rtnl_lock(); -	err = __netpoll_setup(np, ndev); +	err = __netpoll_setup(np, ndev, GFP_KERNEL);  	rtnl_unlock();  	if (err) @@ -878,6 +895,24 @@ static int __init netpoll_init(void)  }  core_initcall(netpoll_init); +static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head) +{ +	struct netpoll_info *npinfo = +			container_of(rcu_head, struct netpoll_info, rcu); + +	skb_queue_purge(&npinfo->arp_tx); +	skb_queue_purge(&npinfo->txq); + +	/* we can't call cancel_delayed_work_sync here, as we are in softirq */ +	cancel_delayed_work(&npinfo->tx_work); + +	/* clean after last, unfinished work */ +	__skb_queue_purge(&npinfo->txq); +	/* now cancel it again */ +	cancel_delayed_work(&npinfo->tx_work); +	kfree(npinfo); +} +  void __netpoll_cleanup(struct netpoll *np)  {  	struct netpoll_info *npinfo; @@ -903,20 +938,24 @@ void __netpoll_cleanup(struct netpoll *np)  			ops->ndo_netpoll_cleanup(np->dev);  		RCU_INIT_POINTER(np->dev->npinfo, NULL); +		call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info); +	} +} +EXPORT_SYMBOL_GPL(__netpoll_cleanup); -		/* avoid racing with NAPI reading npinfo */ -		synchronize_rcu_bh(); +static void rcu_cleanup_netpoll(struct rcu_head *rcu_head) +{ +	struct netpoll *np = container_of(rcu_head, struct netpoll, rcu); -		skb_queue_purge(&npinfo->arp_tx); -		skb_queue_purge(&npinfo->txq); -		cancel_delayed_work_sync(&npinfo->tx_work); +	__netpoll_cleanup(np); +	kfree(np); +} -		/* clean after last, unfinished work */ -		__skb_queue_purge(&npinfo->txq); -		kfree(npinfo); -	} +void __netpoll_free_rcu(struct netpoll *np) +{ +	call_rcu_bh(&np->rcu, rcu_cleanup_netpoll);  } -EXPORT_SYMBOL_GPL(__netpoll_cleanup); +EXPORT_SYMBOL_GPL(__netpoll_free_rcu);  void netpoll_cleanup(struct netpoll *np)  { diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index ed0c0431fcd8..c75e3f9d060f 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -101,12 +101,10 @@ static int write_update_netdev_table(struct net_device *dev)  	u32 max_len;  	struct netprio_map *map; -	rtnl_lock();  	max_len = atomic_read(&max_prioidx) + 1;  	map = rtnl_dereference(dev->priomap);  	if (!map || map->priomap_len < max_len)  		ret = extend_netdev_table(dev, max_len); -	rtnl_unlock();  	return ret;  } @@ -256,17 +254,17 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft,  	if (!dev)  		goto out_free_devname; +	rtnl_lock();  	ret = write_update_netdev_table(dev);  	if (ret < 0)  		goto out_put_dev; -	rcu_read_lock(); -	map = rcu_dereference(dev->priomap); +	map = rtnl_dereference(dev->priomap);  	if (map)  		map->priomap[prioidx] = priority; -	rcu_read_unlock();  out_put_dev: +	rtnl_unlock();  	dev_put(dev);  out_free_devname: @@ -277,12 +275,6 @@ out_free_devname:  void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)  {  	struct task_struct *p; -	char *tmp = kzalloc(sizeof(char) * PATH_MAX, GFP_KERNEL); - -	if (!tmp) { -		pr_warn("Unable to attach cgrp due to alloc failure!\n"); -		return; -	}  	cgroup_taskset_for_each(p, cgrp, tset) {  		unsigned int fd; @@ -296,32 +288,24 @@ void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)  			continue;  		} -		rcu_read_lock(); +		spin_lock(&files->file_lock);  		fdt = files_fdtable(files);  		for (fd = 0; fd < fdt->max_fds; fd++) { -			char *path;  			struct file *file;  			struct socket *sock; -			unsigned long s; -			int rv, err = 0; +			int err;  			file = fcheck_files(files, fd);  			if (!file)  				continue; -			path = d_path(&file->f_path, tmp, PAGE_SIZE); -			rv = sscanf(path, "socket:[%lu]", &s); -			if (rv <= 0) -				continue; -  			sock = sock_from_file(file, &err); -			if (!err) +			if (sock)  				sock_update_netprioidx(sock->sk, p);  		} -		rcu_read_unlock(); +		spin_unlock(&files->file_lock);  		task_unlock(p);  	} -	kfree(tmp);  }  static struct cftype ss_files[] = { diff --git a/net/core/pktgen.c b/net/core/pktgen.c index cce9e53528b1..148e73d2c451 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2721,7 +2721,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,  	/* Eth + IPh + UDPh + mpls */  	datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 -  		  pkt_dev->pkt_overhead; -	if (datalen < sizeof(struct pktgen_hdr)) +	if (datalen < 0 || datalen < sizeof(struct pktgen_hdr))  		datalen = sizeof(struct pktgen_hdr);  	udph->source = htons(pkt_dev->cur_udp_src); diff --git a/net/core/scm.c b/net/core/scm.c index 8f6ccfd68ef4..040cebeed45b 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -265,6 +265,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)  	for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;  	     i++, cmfptr++)  	{ +		struct socket *sock;  		int new_fd;  		err = security_file_receive(fp[i]);  		if (err) @@ -281,6 +282,9 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)  		}  		/* Bump the usage count and install the file. */  		get_file(fp[i]); +		sock = sock_from_file(fp[i], &err); +		if (sock) +			sock_update_netprioidx(sock->sk, current);  		fd_install(new_fd, fp[i]);  	} diff --git a/net/core/skbuff.c b/net/core/skbuff.c index fe00d1208167..e33ebae519c8 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3502,7 +3502,9 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,  	if (!skb_cloned(from))  		skb_shinfo(from)->nr_frags = 0; -	/* if the skb is cloned this does nothing since we set nr_frags to 0 */ +	/* if the skb is not cloned this does nothing +	 * since we set nr_frags to 0. +	 */  	for (i = 0; i < skb_shinfo(from)->nr_frags; i++)  		skb_frag_ref(from, i); diff --git a/net/core/sock.c b/net/core/sock.c index 6b654b3ddfda..a6000fbad294 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -691,7 +691,8 @@ set_rcvbuf:  	case SO_KEEPALIVE:  #ifdef CONFIG_INET -		if (sk->sk_protocol == IPPROTO_TCP) +		if (sk->sk_protocol == IPPROTO_TCP && +		    sk->sk_type == SOCK_STREAM)  			tcp_set_keepalive(sk, valbool);  #endif  		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); @@ -1458,6 +1459,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)  		} else {  			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;  			sk->sk_gso_max_size = dst->dev->gso_max_size; +			sk->sk_gso_max_segs = dst->dev->gso_max_segs;  		}  	}  } @@ -1522,7 +1524,14 @@ EXPORT_SYMBOL(sock_rfree);  void sock_edemux(struct sk_buff *skb)  { -	sock_put(skb->sk); +	struct sock *sk = skb->sk; + +#ifdef CONFIG_INET +	if (sk->sk_state == TCP_TIME_WAIT) +		inet_twsk_put(inet_twsk(sk)); +	else +#endif +		sock_put(sk);  }  EXPORT_SYMBOL(sock_edemux); | 
