quagga源码分析--大内总管zebra
Posted 丹西
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了quagga源码分析--大内总管zebra相关的知识,希望对你有一定的参考价值。
zebra,中文翻译是斑马,于是我打开了宋冬野的《斑马,斑马》作为BGM来完成这个篇章,嘿嘿,小资一把!
zebra姑且戏称它是quagga项目的大内总管。
因为它负责管理其他所有协议进程的路由信息的更新与交互,并负责与内核交换信息,如下的架构:
1 +----+ +----+ +-----+ +-----+ 2 |bgpd| |ripd| |ospfd| |zebra| 3 +----+ +----+ +-----+ +-----+ 4 | 5 +---------------------------|--+ 6 | v | 7 | UNIX Kernel routing table | 8 | | 9 +------------------------------+
好了,简介完了,开始看代码吧:
1、zebra作为其他协议进程的服务端:
1 /* Make zebra server socket, wiping any existing one (see bug #403). */ 2 void 3 zebra_zserv_socket_init(char *path) { 4 #ifdef HAVE_TCP_ZEBRA 5 zebra_serv(); 6 #else 7 zebra_serv_un(path ? path : ZEBRA_SERV_PATH); 8 #endif /* HAVE_TCP_ZEBRA */ 9 }
zebra绑定了(loopback,2600)的地址和端口,并开始监听socket,同时加入到thread事件ZEBRA_SERV当中,来接收客户端发送过来的路由信息:
1 accept_sock = socket(AF_INET, SOCK_STREAM, 0); 2 3 addr.sin_family = AF_INET; 4 5 addr.sin_port = htons(ZEBRA_PORT); 6 #ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN 7 addr.sin_len = sizeof(struct sockaddr_in); 8 #endif /* HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */ 9 addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); 10 11 ret = bind(accept_sock, (struct sockaddr *)&addr, sizeof(struct sockaddr_in)); 12 13 ret = listen(accept_sock, 1);
zebra_event(ZEBRA_SERV, accept_sock, NULL);
2、客户端(比如isis协议):
2.1 创建客户端,并在初始化加入到thread事件调度当中去。
1 void 2 isis_zebra_init(struct thread_master *master) { 3 zclient = zclient_new(master); 4 zclient_init(zclient, ZEBRA_ROUTE_ISIS); 5 6 ...... 7 8 return; 9 }
1 void 2 zclient_init (struct zclient *zclient, int redist_default) 3 { 4 int i; 5 /* Enable zebra client connection by default. */ 6 zclient->enable = 1; 7 /* Set -1 to the default socket value. */ 8 zclient->sock = -1; 9 ..... 10 zclient_event (ZCLIENT_SCHEDULE, zclient); 11 }
1 static void 2 zclient_event(enum event event, struct zclient *zclient) { 3 switch (event) { 4 case ZCLIENT_SCHEDULE: 5 if (!zclient->t_connect) zclient->t_connect = 6 thread_add_event(zclient->master, zclient_connect, zclient, 0); 7 break; 8 ...... 9 } 10 }
2.2 在zclient_connect里调用zclient_socket完成客户端sock的初始化:
1 int zclient_socket_connect(struct zclient *zclient) { 2 #ifdef HAVE_TCP_ZEBRA 3 zclient->sock = zclient_socket(); 4 #else 5 zclient->sock = zclient_socket_un(zclient_serv_path_get()); 6 #endif 7 return zclient->sock; 8 }
1 static int 2 zclient_socket(void) { 3 int sock; 4 int ret; 5 struct sockaddr_in serv; 6 7 /* We should think about IPv6 connection. */ 8 sock = socket(AF_INET, SOCK_STREAM, 0); 9 if (sock < 0) return -1; 10 11 /* Make server socket. */ 12 memset(&serv, 0, sizeof(struct sockaddr_in)); 13 serv.sin_family = AF_INET; 14 serv.sin_port = htons(ZEBRA_PORT); 15 #ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN 16 serv.sin_len = sizeof(struct sockaddr_in); 17 #endif /* HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */ 18 serv.sin_addr.s_addr = htonl(INADDR_LOOPBACK); 19 20 /* Connect to zebra. */ 21 ret = connect(sock, (struct sockaddr *)&serv, sizeof(serv)); 22 if (ret < 0) { 23 close(sock); 24 return -1; 25 } 26 return sock; 27 }
嗯,服务端和客户端就这样完成了tcp通信连接,其他的客户端(如bgpd,ospfd等等)都是调用zclient_socket完成连接,代码复用了哦!
3、下面来看看作为大内总管,zebra如何处理日常事务:
首先是在thread的调度中,增加了read事件(可以看到整个系统,都是由thread模块在在暗中维持的运转)
1 thread_add_read(zebrad.master, zebra_client_read, client, sock);
1 /* Handler of zebra service request. */ 2 static int 3 zebra_client_read(struct thread *thread) { 4 ...... 5 command = stream_getw(client->ibuf); 6 7 ..... 8 9 switch (command) { 10 ..... 11 12 case ZEBRA_IPV4_ROUTE_ADD: 13 zread_ipv4_add(client, length, vrf_id); 14 break; 15 case ZEBRA_IPV4_ROUTE_DELETE: 16 zread_ipv4_delete(client, length, vrf_id); 17 break; 18 ...... 19 default: 20 zlog_info("Zebra received unknown command %d", command); 21 break; 22 } 23 24 ...... 25 26 zebra_event(ZEBRA_READ, sock, client); 27 return 0; 28 }
如上述代码,从消息内容中读取到对应事件号,比如ZEBRA_IPV4_ROUTE_ADD,ZEBRA_IPV4_ROUTE_DELETE,即是增加ipv4路由和删除ipv4路由。
4、再来看看大内总管(zebra)如何与皇上(内核)交互的:
在main里对这个过程做了初始化,函数是rib_init。
1 /* Routing information base initialize. */ 2 void 3 rib_init(void) 4 { 5 rib_queue_init(&zebrad); 6 } 7 8 /* fill in the work queue spec */ 9 zebra->ribq->spec.workfunc = &meta_queue_process;
上面代码创建一个工作队列,作为thread调度模块的一个低等级的后台调度(THREAD_BACKGROUND)执行的任务。在meta_queue_process函数里处理各个子队列:
1 /* Dispatch the meta queue by picking, processing and unlocking the next RN from 2 * a non-empty sub-queue with lowest priority. wq is equal to zebra->ribq and data 3 * is pointed to the meta queue structure. 4 */ 5 static wq_item_status 6 meta_queue_process(struct work_queue *dummy, void *data) 7 { 8 struct meta_queue *mq = data; 9 unsigned i; 10 11 for (i = 0; i < MQ_SIZE; i++) if (process_subq(mq->subq[i], i)) 12 { 13 mq->size--; 14 break; 15 } 16 return mq->size ? WQ_REQUEUE : WQ_SUCCESS; 17 }
process_subq函数里调用rib_process函数,即开始了对路由信息的处理,整个内核的路由的新旧比较与更新以及它的数据结构内容比较多(尴尬,发现布局不太合理了。),后面单独写一篇,这里就简单看一下流程:
1 int 2 kernel_route_rib (struct prefix *p, struct rib *old, struct rib *new) 3 { 4 int route = 0; 5 6 if (zserv_privs.change(ZPRIVS_RAISE)) 7 zlog (NULL, LOG_ERR, "Can‘t raise privileges"); 8 9 if (old) 10 route |= kernel_rtm (RTM_DELETE, p, old); 11 12 if (new) 13 route |= kernel_rtm (RTM_ADD, p, new); 14 15 if (zserv_privs.change(ZPRIVS_LOWER)) 16 zlog (NULL, LOG_ERR, "Can‘t lower privileges"); 17 18 return route; 19 }
可以看到,最后使用netlink通信来更新内核的路由信息。
以上是关于quagga源码分析--大内总管zebra的主要内容,如果未能解决你的问题,请参考以下文章