[ Pobierz całość w formacie PDF ]
.It essentially blocks any incoming* packets, so that we won't get any new data or any* packets that change the state of the socket.** Note the 'barrier()' calls: gcc may not move a lock* "downwards" or a unlock "upwards" when optimizing.*/extern void __release_sock(struct sock *sk);static inline void lock_sock(struct sock *sk){#if 0/* debugging code: the test isn't even 100% correct, but it can catch bugs *//* Note that a double lock is ok in theory - it's just _usually_ a bug */if (sk->users) {__label__ here;printk("double lock on socket at %p\n", &&here);here:}#endifsk->users++;barrier();}static inline void release_sock(struct sock *sk){barrier();#if 0/* debugging code: remove me when ok */if (sk->users == 0) {__label__ here;sk->users = 1;printk("trying to unlock unlocked socket at %p\n", &&here);here:}#endifif ((sk->users = sk->users-1) == 0)__release_sock(sk);}extern struct sock * sk_alloc(int priority);extern void sk_free(struct sock *sk);extern void destroy_sock(struct sock *sk);extern unsigned short get_new_socknum(struct proto *,unsigned short);extern void put_sock(unsigned short, struct sock *);extern struct sock *get_sock(struct proto *, unsigned short,unsigned long, unsigned short,unsigned long,unsigned long, unsigned short);extern struct sock *get_sock_mcast(struct sock *, unsigned short,unsigned long, unsigned short,unsigned long);extern struct sock *get_sock_raw(struct sock *, unsigned short,unsigned long, unsigned long);extern struct sk_buff *sock_wmalloc(struct sock *sk,unsigned long size, int force,int priority);extern struct sk_buff *sock_rmalloc(struct sock *sk,unsigned long size, int force,int priority);extern void sock_wfree(struct sock *sk,struct sk_buff *skb);extern void sock_rfree(struct sock *sk,struct sk_buff *skb);extern unsigned long sock_rspace(struct sock *sk);extern unsigned long sock_wspace(struct sock *sk);extern int sock_setsockopt(struct sock *sk, int level,int op, char *optval,int optlen);extern int sock_getsockopt(struct sock *sk, int level,int op, char *optval,int *optlen);extern struct sk_buff *sock_alloc_send_skb(struct sock *skb,unsigned long size,unsigned long fallback,int noblock,int *errcode);/** Queue a received datagram if it will fit.Stream and sequenced* protocols can't normally use this as they need to fit buffers in* and play with them.** Inlined as it's very short and called for pretty much every* packet ever received.*/extern __inline__ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb){if (sk->rmem_alloc + skb->truesize >= sk->rcvbuf)return -ENOMEM;atomic_add(skb->truesize, &sk->rmem_alloc);skb->sk=sk;skb_queue_tail(&sk->receive_queue,skb);if (!sk->dead)sk->data_ready(sk,skb->len);return 0;}extern __inline__ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb){if (sk->rmem_alloc + skb->truesize >= sk->rcvbuf)return -ENOMEM;atomic_add(skb->truesize, &sk->rmem_alloc);skb->sk=sk;__skb_queue_tail(&sk->receive_queue,skb);if (!sk->dead)sk->data_ready(sk,skb->len);return 0;}/** Recover an error report and clear atomically*/extern __inline__ int sock_error(struct sock *sk){int err=xchg(&sk->err,0);return -err;}/** Declarations from timer.c*/extern struct sock *timer_base;extern void delete_timer (struct sock *);extern void reset_timer (struct sock *, int, unsigned long);extern void net_timer (unsigned long);/** Enable debug/info messages*/#define NETDEBUG(x) do { } while (0)#endif /* _SOCK_H */
[ Pobierz całość w formacie PDF ]