nginx回源时bind ip的一些优化

简介

proxy_bind隶属于proxy_module,为向后端建立连接时的local ip,在nginx源码中只支持bind一个ip进行回源,若想使用多个ip进行回源时,可以修改源码支持bind ip数组。在实际应用中我就是这样做的。bind ip数据轮询选择ip进行回源与upstream建立连接,以解决单ip回源连接数限制问题。下面proxy_bind部分就是针对proxy_bind进行优化后的代码,支持bind多ip。

check_bind则是对源站进行健康检查所使用的源ip,在对upstream进行健康检查时,所使用的源ip与upstream建连根据响应进行判断健康状况,将健康检查所使用的ip组与业务回源使用的ip组进行区分开来。check_bind配置并不是nginx自带的功能需要对nginx进行二次开发。

proxy_bind

nginx源码配置:

{ ngx_string(“proxy_bind”),

NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE12,

ngx_http_upstream_bind_set_slot,

NGX_HTTP_LOC_CONF_OFFSET,

offsetof(ngx_http_proxy_loc_conf_t, upstream.local),

NULL },

改进后配置:

ngx_string(“proxy_bind”),

NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_1MORE,

ngx_http_upstream_bind_set_slot_array,

NGX_HTTP_LOC_CONF_OFFSET,

offsetof(ngx_http_proxy_loc_conf_t, upstream.local_array),

NULL },

下面是相关代码优化部分:

char *ngx_http_upstream_bind_set_slot_array(ngx_conf_t *cf, ngx_command_t *cmd,

void *conf)

{

ngx_http_upstream_local_array_t         **plocal, *local;

plocal = (ngx_http_upstream_local_array_t **) (p + cmd->offset);

if (*plocal != NGX_CONF_UNSET_PTR) {

return “bind is duplicate”;

}

value = cf->args->elts;

// 建立local array

local = ngx_pcalloc(cf->pool, sizeof(ngx_http_upstream_local_array_t));

*plocal = local;

// 建立local peer addr

local->addr = ngx_pcalloc(cf->pool, sizeof(ngx_peer_addrs_t));

// 建立addr array

local->addr->addrs = ngx_array_create(cf->pool, 1, sizeof(ngx_addr_t));

// 遍历所有的local ip,放进array中

for (i = 1; i args->nelts; i++) {

addr = ngx_array_push(local->addr->addrs);

if (addr == NULL) {

return NGX_CONF_ERROR;

}

rc = ngx_parse_addr(cf->pool, addr, value[i].data, value[i].len);

switch (rc) {

case NGX_OK:

addr->name = value[i];

break;

case NGX_DECLINED:

ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,

“invalid address “%V””, &value[i]);

/* fall through */

default:

return NGX_CONF_ERROR;

}

}

}

之后在init_request中将u→peer.local_array赋值为u→conf→local_array(即刚刚通过指令赋值的)

static void ngx_http_upstream_init_request(ngx_http_request_t *r)

{

u = r->upstream;

u->peer.local_array = ngx_http_upstream_get_local_array(r, u->conf->local_array);

}

这个u→conf是如何获取的呢?其实是通过不同的handler获得的,比如我们tengine配置中常用的proxy_pass,就可以将这个赋值上去

static ngx_int_t ngx_http_proxy_handler(ngx_http_request_t *r)

{

u = r->upstream;

// 获取proxy的loc conf

plcf = ngx_http_get_module_loc_conf(r, ngx_http_proxy_module);

// 获取对应的conf

u->conf = &plcf->upstream;

}

这个plcf->upstream→upstream是真正的upstream_srv_conf,是在proxy_pass中获取的

static char *ngx_http_proxy_pass(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)

{

plcf->upstream.upstream = ngx_http_upstream_add(cf, &u, 0);

}

增加chenk_bind模块

check_bind 在健康检查的main_conf中添加global_local。

ngx_string(“check_bind”),

NGX_HTTP_MAIN_CONF|NGX_CONF_1MORE,

ngx_http_upstream_bind_set_slot_array,

NGX_HTTP_MAIN_CONF_OFFSET,

offsetof(ngx_http_upstream_check_main_conf_t, global_local),

NULL,

typedef struct {

ngx_uint_t                               check_shm_size;

ngx_http_upstream_check_peers_t         *peers;

ngx_http_upstream_local_array_t *global_local;

} ngx_http_upstream_check_main_conf_t;

接下来的操作和proxy_bind类似

char * ngx_http_upstream_bind_set_slot_array(ngx_conf_t *cf, ngx_command_t *cmd,

void *conf)

{

// 这里的global是upstream_check_main_conf

}

将global_local存在ucmcf中之后,接下来要将这个global放到每个upstream中,也就是uscf

调用的是upstream_check的init_main方法

static char * ngx_http_upstream_check_init_main_conf(ngx_conf_t *cf, void *conf)

{

// 拿到upstream module的main conf

umcf = ngx_http_conf_get_module_main_conf(cf, ngx_http_upstream_module);

// 拿到后端数组的指针

uscfp = umcf->upstreams.elts;

for (i = 0; i upstreams.nelts; i++) {

// 循环赋值

if (ngx_http_upstream_check_init_srv_conf(cf, uscfp[i], ucmcf->global_local) != NGX_OK) {

return NGX_CONF_ERROR;

}

}

}

static char * ngx_http_upstream_check_init_srv_conf(ngx_conf_t *cf, void *conf,

ngx_http_upstream_local_array_t *global_local)

{

ngx_http_upstream_srv_conf_t       *us = conf;

// 拿到这个upstream srv conf下的check module conf

ucscf = ngx_http_conf_upstream_srv_conf(us, ngx_http_upstream_check_module);

// 进行赋值

if (ucscf->global_local == NGX_CONF_UNSET_PTR) {

if (global_local != NGX_CONF_UNSET_PTR && global_local != NULL) {

ucscf->global_local = global_local;

}

}

}

如果dyups兼容的话,需要在add peer里面保证被赋值,同时添加peer

ngx_uint_t ngx_http_upstream_check_add_peer(ngx_conf_t *cf,

ngx_http_upstream_srv_conf_t *us, ngx_addr_t *peer_addr

#ifdef CONFIG_NGX_NS

, ngx_int_t vni, ngx_addr_t *hp_addr, ngx_addr_t *hp2_addr, __u8 *mac

#endif

)

{

//add check_bind support for dyups modules.

if (ucscf->global_local == NGX_CONF_UNSET_PTR) {

if (ucmcf->global_local != NGX_CONF_UNSET_PTR && ucmcf->global_local != NULL) {

ucscf->global_local = ucmcf->global_local;

}

}

// 添加peer

peers = ucmcf->peers;

peer = ngx_array_push(&peers->peers);

peer->index = peers->peers.nelts – 1;

// 这部分很关键,上面刚刚赋值好global_local的ucscf被赋值为peer->conf

peer->conf = ucscf;

peer->upstream_name = &us->host;

peer->peer_addr = peer_addr;

}

在add_timer的时将这些handler全部赋值,data指定为上面生成的peer

static ngx_int_t ngx_http_upstream_check_add_timer(ngx_http_upstream_check_peer_t *peer,

ngx_check_conf_t *check_conf, ngx_msec_t timer, ngx_log_t *log)

{

peer->check_ev.handler = ngx_http_upstream_check_begin_handler;

peer->check_ev.log = log;

peer->check_ev.data = peer;

}

ngx_http_upstream_check_connect_handler 这是upstream check的时候调用的handler

static void ngx_http_upstream_check_begin_handler(ngx_event_t *event)

{

if (peer->shm->owner == ngx_pid) {

ngx_http_upstream_check_connect_handler(event);

}

}

static void ngx_http_upstream_check_connect_handler(ngx_event_t *event)

{

peer = event->data;

// peer的conf就是ucscf

ucscf = peer->conf;

// 赋值

if (peer->conf->global_local != NGX_CONF_UNSET_PTR && peer->conf->global_local != NULL) {

peer->pc.local_array = peer->conf->global_local->addr;

} else {

peer->pc.local_array = NULL;

}

rc = ngx_event_connect_peer(&peer->pc);

}

© 版权声明
THE END
喜欢就支持一下吧
点赞0
分享
评论 抢沙发

请登录后发表评论