山下智博 结婚:Android socket创建、绑定流程分析(二)

来源:百度文库 编辑:偶看新闻 时间:2024/04/27 22:58:21
Android socket创建、绑定流程分析(二)2011年08月01日 星期一 22:11

看看packet_ops的结构:

static const struct proto_ops packet_ops = {

       .family = PF_PACKET,

       .owner = THIS_MODULE,

       .release = packet_release,

       .bind =           packet_bind,

       .connect =      sock_no_connect,

       .socketpair =   sock_no_socketpair,

       .accept = sock_no_accept,

       .getname =     packet_getname,

       .poll =            packet_poll,

       .ioctl =    packet_ioctl,

       .listen =   sock_no_listen,

       .shutdown =    sock_no_shutdown,

       .setsockopt =   packet_setsockopt,

       .getsockopt =  packet_getsockopt,

       .sendmsg =     packet_sendmsg,

       .recvmsg =     packet_recvmsg,

       .mmap =        packet_mmap,

       .sendpage =    sock_no_sendpage,

};

System_call最终会调用对应Protocol family的操作函数。

 

Socket Bind

Bind调用system call,参考上面的分析最近会调用到packet_bind函数:

static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)

{

      。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。

       if (addr_len < sizeof(struct sockaddr_ll))

              return -EINVAL;

       if (sll->sll_family != AF_PACKET)

              return -EINVAL;

 

       if (sll->sll_ifindex) {

              err = -ENODEV;

              dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex); //通过ifindex找到对应的device

              if (dev == NULL)

                     goto out;

       }

       err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);

。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。

}

 

static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)

{

       struct packet_sock *po = pkt_sk(sk);

       /*

        *    Detach an existing hook if present.

        */

 

       lock_sock(sk);

 

       spin_lock(&po->bind_lock);  //socket在创建的时候已经将协议的处理方式添加到协议栈

       if (po->running) {          //所以这里要解除。很奇怪至少我看pf_create的时候

              __sock_put(sk);        //已经添加了。

              po->running = 0;

              po->num = 0;

              spin_unlock(&po->bind_lock);

              dev_remove_pack(&po->prot_hook);

              spin_lock(&po->bind_lock);

       }

       po->num = protocol;

       po->prot_hook.type = protocol;

       po->prot_hook.dev = dev;

 

       po->ifindex = dev ? dev->ifindex : 0;

 

       if (protocol == 0)

              goto out_unlock;

 

       if (!dev || (dev->flags & IFF_UP)) {  //这里是不是程序员犯错了?!dev ??

              dev_add_pack(&po->prot_hook);

              sock_hold(sk);

              po->running = 1;

       } else {

              sk->sk_err = ENETDOWN;

              if (!sock_flag(sk, SOCK_DEAD))

                     sk->sk_error_report(sk);

       }

。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。

}

 

void dev_add_pack(struct packet_type *pt)

{

       int hash;

 

       spin_lock_bh(&ptype_lock);

       if (pt->type == htons(ETH_P_ALL))

              list_add_rcu(&pt->list, &ptype_all);

       else {

              hash = ntohs(pt->type) & PTYPE_HASH_MASK;

              list_add_rcu(&pt->list, &ptype_base[hash]); // ptype_base network stack list head

       }                                          //在接收到数据时会通过数据包类型,找到对应的

       spin_unlock_bh(&ptype_lock);                  //的ptype

}

 

Send:

应用通过soccket发送数据函数调用的顺序:

Sendtoàsys_sendtoàsock_sendmsgà__sock_sendmsgàsendmsgàpacket_sendmsgàdev_queue_xmitàdev_hard_start_xmit

 

dev_hard_start_xmit这个具体实现随便找个无线网卡的驱动看看,他的作用就是将数据直接写入硬件的buffer然后发送。

Send过程比较简单,代码留给读者自己分析。

 

网络数据通过socket发送到应用层:

netif_rxànapi_scheduleà__napi_scheduleà__raise_softirq_irqoff(NET_RX_SOFTIRQ)ànet_rx_actionàprocess_backlogànetif_receive_skbàdeliver_skbàpacket_rcvàsock_def_readableàwake up sleep process and get data

 

若读取socket数据,通过文件系统会调用sock_aio_read最终调用到对应socket协议处理函数packet_recvmsg。这个也留给读者自己分析代码。过程如下:

readàsock_aio_readàdo_sock_readàrecvmsgàpacket_recvmsg

 

下面详细分析代码,了解网络数据如何通过socket发送到应用层的:

驱动接收到数据后会调用netif_rx通知应用层,netif_rx代码如下:

int netif_rx(struct sk_buff *skb)

{

       struct softnet_data *queue;

       unsigned long flags;

    //如果驱动程序是netpoll类型则直接调用netpoll_rx,netpoll驱动参考netconsole.c

       /* if netpoll wants it, pretend we never saw it */

       if (netpoll_rx(skb))

              return NET_RX_DROP;

    //加上时间戳

       if (!skb->tstamp.tv64)

              net_timestamp(skb);

 

       /*

        * The code is rearranged so that the path is the most

        * short when CPU is congested, but is still operating.

        */

       local_irq_save(flags);

       queue = &__get_cpu_var(softnet_data); //获取cpu的私有数据

 

       __get_cpu_var(netdev_rx_stat).total++;

       if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {

              if (queue->input_pkt_queue.qlen) {//接收队列不为空则直接加到队列否则先调度napi再将数据加入队列

enqueue:

                     __skb_queue_tail(&queue->input_pkt_queue, skb);

                     local_irq_restore(flags);

                     return NET_RX_SUCCESS;

              }

        //调度napi

              napi_schedule(&queue->backlog);

              goto enqueue;

       }

 

       __get_cpu_var(netdev_rx_stat).dropped++;

       local_irq_restore(flags);

 

       kfree_skb(skb);

       return NET_RX_DROP;

}

 

static inline void napi_schedule(struct napi_struct *n)

{   //检查napi的状态是否在调度中并设置napi状态

       if (napi_schedule_prep(n))

              __napi_schedule(n);

}

 

void __napi_schedule(struct napi_struct *n)

{

       unsigned long flags;

 

       local_irq_save(flags);

       list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list); //将当前napi_sturct添加到cup的poll_list

       __raise_softirq_irqoff(NET_RX_SOFTIRQ); //产生软中断,在net_dev_init中通过open_softirq注册处理函数

       local_irq_restore(flags);

}