Open vislee opened 7 years ago
proxy模块是通过ups机制实现了反向代理功能的。该模块非常复杂。 在这里不会说ups机制,除非引入的非常深入。其余的均会带过,了解ups机制请看上篇。
proxy模块通过proxy_cache[_xxx]指令控制proxy缓存。
proxy_hide_header field; 隐藏上游resp的header。 proxy_ignore_headers field ...; 忽略上游resp的header,不起作用。一般是x-accel-xxx的头。
和缓存相关的: proxy_cache_path 配置缓存文件的磁盘路径和存放缓存元数据的共享内存。 proxy_cache_valid 配置缓存的状态码和对应有效时常。 proxy_cache 是否开启缓存,和开启缓存要存放的路径和元数据共享内存,和proxy_cache_path对应,可以为变量。 proxy_cache_convert_head 是否把head请求转成get请求缓存。 proxy_cache_bypass 有缓存但是不从缓存获取。bypass。
static char * ngx_http_proxy_cache(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { ngx_http_proxy_loc_conf_t *plcf = conf; ngx_str_t *value; ngx_http_complex_value_t cv; ngx_http_compile_complex_value_t ccv; value = cf->args->elts; if (plcf->upstream.cache != NGX_CONF_UNSET) { return "is duplicate"; } if (ngx_strcmp(value[1].data, "off") == 0) { plcf->upstream.cache = 0; return NGX_CONF_OK; } if (plcf->upstream.store > 0) { return "is incompatible with \"proxy_store\""; } plcf->upstream.cache = 1; ngx_memzero(&ccv, sizeof(ngx_http_compile_complex_value_t)); ccv.cf = cf; ccv.value = &value[1]; ccv.complex_value = &cv; if (ngx_http_compile_complex_value(&ccv) != NGX_OK) { return NGX_CONF_ERROR; } if (cv.lengths != NULL) { // zone 有变量 plcf->upstream.cache_value = ngx_palloc(cf->pool, sizeof(ngx_http_complex_value_t)); if (plcf->upstream.cache_value == NULL) { return NGX_CONF_ERROR; } *plcf->upstream.cache_value = cv; return NGX_CONF_OK; } // zone 有变量 plcf->upstream.cache_zone = ngx_shared_memory_add(cf, &value[1], 0, &ngx_http_proxy_module); if (plcf->upstream.cache_zone == NULL) { return NGX_CONF_ERROR; } return NGX_CONF_OK; }
struct ngx_http_file_cache_s { ngx_http_file_cache_sh_t *sh; ngx_slab_pool_t *shpool; ngx_path_t *path; off_t max_size; size_t bsize; time_t inactive; time_t fail_time; ngx_uint_t files; ngx_uint_t loader_files; ngx_msec_t last; ngx_msec_t loader_sleep; ngx_msec_t loader_threshold; ngx_uint_t manager_files; ngx_msec_t manager_sleep; ngx_msec_t manager_threshold; ngx_shm_zone_t *shm_zone; ngx_uint_t use_temp_path; /* unsigned use_temp_path:1 */ }; // ngx_http_file_cache_new函数调用赋值给r->cache. struct ngx_http_cache_s { ngx_file_t file; ngx_array_t keys; // 缓存的唯一索引,对应于proxy_cache_key指令的配置。 uint32_t crc32; // key 的crc32 u_char key[NGX_HTTP_CACHE_KEY_LEN]; u_char main[NGX_HTTP_CACHE_KEY_LEN]; // key的md5值 ngx_file_uniq_t uniq; time_t valid_sec; time_t updating_sec; time_t error_sec; time_t last_modified; time_t date; ngx_str_t etag; ngx_str_t vary; u_char variant[NGX_HTTP_CACHE_KEY_LEN]; size_t header_start; size_t body_start; off_t length; off_t fs_size; ngx_uint_t min_uses; ngx_uint_t error; ngx_uint_t valid_msec; ngx_uint_t vary_tag; ngx_buf_t *buf; ngx_http_file_cache_t *file_cache; ngx_http_file_cache_node_t *node; #if (NGX_THREADS || NGX_COMPAT) ngx_thread_task_t *thread_task; #endif ngx_msec_t lock_timeout; ngx_msec_t lock_age; ngx_msec_t lock_time; ngx_msec_t wait_time; ngx_event_t wait_event; unsigned lock:1; unsigned waiting:1; unsigned updated:1; unsigned updating:1; unsigned exists:1; unsigned temp_file:1; unsigned purged:1; unsigned reading:1; unsigned secondary:1; unsigned background:1; unsigned stale_updating:1; unsigned stale_error:1; }; // proxy_cache_path 指令对应的函数。 // 配置一个缓存目录,并设置一块共享内存。共享内存是目录文件索引。 // 缓存目录也是普通的目录文件,因此也要添加到cycle->paths中 // 缓存目录需要添加一些缓存策略和索引,因此需要单独添加到ngx_http_proxy_main_conf_t->caches中。 // proxy_cache指令可能会通过共享内存索引该缓存目录。 char * ngx_http_file_cache_set_slot(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { char *confp = conf; off_t max_size; u_char *last, *p; time_t inactive; ssize_t size; ngx_str_t s, name, *value; ngx_int_t loader_files, manager_files; ngx_msec_t loader_sleep, manager_sleep, loader_threshold, manager_threshold; ngx_uint_t i, n, use_temp_path; ngx_array_t *caches; ngx_http_file_cache_t *cache, **ce; cache = ngx_pcalloc(cf->pool, sizeof(ngx_http_file_cache_t)); if (cache == NULL) { return NGX_CONF_ERROR; } cache->path = ngx_pcalloc(cf->pool, sizeof(ngx_path_t)); if (cache->path == NULL) { return NGX_CONF_ERROR; } use_temp_path = 1; inactive = 600; loader_files = 100; loader_sleep = 50; loader_threshold = 200; manager_files = 100; manager_sleep = 50; manager_threshold = 200; name.len = 0; size = 0; max_size = NGX_MAX_OFF_T_VALUE; value = cf->args->elts; cache->path->name = value[1]; if (cache->path->name.data[cache->path->name.len - 1] == '/') { cache->path->name.len--; } if (ngx_conf_full_name(cf->cycle, &cache->path->name, 0) != NGX_OK) { return NGX_CONF_ERROR; } for (i = 2; i < cf->args->nelts; i++) { if (ngx_strncmp(value[i].data, "levels=", 7) == 0) { p = value[i].data + 7; last = value[i].data + value[i].len; for (n = 0; n < NGX_MAX_PATH_LEVEL && p < last; n++) { if (*p > '0' && *p < '3') { // level[3] 最多3级子目录,每一级子目录长度为1,2。 // proxy_cache_path /data/nginx/cache levels=1:2 // 最后保存的目录是:/data/nginx/cache/c/29/ cache->path->level[n] = *p++ - '0'; cache->path->len += cache->path->level[n] + 1; if (p == last) { break; } if (*p++ == ':' && n < NGX_MAX_PATH_LEVEL - 1 && p < last) { continue; } goto invalid_levels; } goto invalid_levels; } // level 小于3级,每级加上分隔符长度为3 if (cache->path->len < 10 + NGX_MAX_PATH_LEVEL) { continue; } invalid_levels: ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid \"levels\" \"%V\"", &value[i]); return NGX_CONF_ERROR; } if (ngx_strncmp(value[i].data, "use_temp_path=", 14) == 0) { if (ngx_strcmp(&value[i].data[14], "on") == 0) { use_temp_path = 1; } else if (ngx_strcmp(&value[i].data[14], "off") == 0) { use_temp_path = 0; } else { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid use_temp_path value \"%V\", " "it must be \"on\" or \"off\"", &value[i]); return NGX_CONF_ERROR; } continue; } if (ngx_strncmp(value[i].data, "keys_zone=", 10) == 0) { name.data = value[i].data + 10; p = (u_char *) ngx_strchr(name.data, ':'); if (p) { name.len = p - name.data; p++; s.len = value[i].data + value[i].len - p; s.data = p; size = ngx_parse_size(&s); if (size > 8191) { continue; } } ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid keys zone size \"%V\"", &value[i]); return NGX_CONF_ERROR; } if (ngx_strncmp(value[i].data, "inactive=", 9) == 0) { s.len = value[i].len - 9; s.data = value[i].data + 9; inactive = ngx_parse_time(&s, 1); if (inactive == (time_t) NGX_ERROR) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid inactive value \"%V\"", &value[i]); return NGX_CONF_ERROR; } continue; } if (ngx_strncmp(value[i].data, "max_size=", 9) == 0) { s.len = value[i].len - 9; s.data = value[i].data + 9; max_size = ngx_parse_offset(&s); if (max_size < 0) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid max_size value \"%V\"", &value[i]); return NGX_CONF_ERROR; } continue; } if (ngx_strncmp(value[i].data, "loader_files=", 13) == 0) { loader_files = ngx_atoi(value[i].data + 13, value[i].len - 13); if (loader_files == NGX_ERROR) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid loader_files value \"%V\"", &value[i]); return NGX_CONF_ERROR; } continue; } if (ngx_strncmp(value[i].data, "loader_sleep=", 13) == 0) { s.len = value[i].len - 13; s.data = value[i].data + 13; loader_sleep = ngx_parse_time(&s, 0); if (loader_sleep == (ngx_msec_t) NGX_ERROR) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid loader_sleep value \"%V\"", &value[i]); return NGX_CONF_ERROR; } continue; } if (ngx_strncmp(value[i].data, "loader_threshold=", 17) == 0) { s.len = value[i].len - 17; s.data = value[i].data + 17; loader_threshold = ngx_parse_time(&s, 0); if (loader_threshold == (ngx_msec_t) NGX_ERROR) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid loader_threshold value \"%V\"", &value[i]); return NGX_CONF_ERROR; } continue; } if (ngx_strncmp(value[i].data, "manager_files=", 14) == 0) { manager_files = ngx_atoi(value[i].data + 14, value[i].len - 14); if (manager_files == NGX_ERROR) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid manager_files value \"%V\"", &value[i]); return NGX_CONF_ERROR; } continue; } if (ngx_strncmp(value[i].data, "manager_sleep=", 14) == 0) { s.len = value[i].len - 14; s.data = value[i].data + 14; manager_sleep = ngx_parse_time(&s, 0); if (manager_sleep == (ngx_msec_t) NGX_ERROR) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid manager_sleep value \"%V\"", &value[i]); return NGX_CONF_ERROR; } continue; } if (ngx_strncmp(value[i].data, "manager_threshold=", 18) == 0) { s.len = value[i].len - 18; s.data = value[i].data + 18; manager_threshold = ngx_parse_time(&s, 0); if (manager_threshold == (ngx_msec_t) NGX_ERROR) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid manager_threshold value \"%V\"", &value[i]); return NGX_CONF_ERROR; } continue; } ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid parameter \"%V\"", &value[i]); return NGX_CONF_ERROR; } if (name.len == 0 || size == 0) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "\"%V\" must have \"keys_zone\" parameter", &cmd->name); return NGX_CONF_ERROR; } cache->path->manager = ngx_http_file_cache_manager; cache->path->loader = ngx_http_file_cache_loader; cache->path->data = cache; cache->path->conf_file = cf->conf_file->file.name.data; cache->path->line = cf->conf_file->line; cache->loader_files = loader_files; cache->loader_sleep = loader_sleep; cache->loader_threshold = loader_threshold; cache->manager_files = manager_files; cache->manager_sleep = manager_sleep; cache->manager_threshold = manager_threshold; // 添加到cycle这个结构体的path数组中,由nginx框架统一管理 if (ngx_add_path(cf, &cache->path) != NGX_OK) { return NGX_CONF_ERROR; } cache->shm_zone = ngx_shared_memory_add(cf, &name, size, cmd->post); if (cache->shm_zone == NULL) { return NGX_CONF_ERROR; } if (cache->shm_zone->data) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "duplicate zone \"%V\"", &name); return NGX_CONF_ERROR; } cache->shm_zone->init = ngx_http_file_cache_init; cache->shm_zone->data = cache; cache->use_temp_path = use_temp_path; cache->inactive = inactive; cache->max_size = max_size; caches = (ngx_array_t *) (confp + cmd->offset); ce = ngx_array_push(caches); if (ce == NULL) { return NGX_CONF_ERROR; } *ce = cache; return NGX_CONF_OK; }
static char * ngx_http_proxy_pass(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { ngx_http_proxy_loc_conf_t *plcf = conf; size_t add; u_short port; ngx_str_t *value, *url; ngx_url_t u; ngx_uint_t n; ngx_http_core_loc_conf_t *clcf; ngx_http_script_compile_t sc; if (plcf->upstream.upstream || plcf->proxy_lengths) { return "is duplicate"; } clcf = ngx_http_conf_get_module_loc_conf(cf, ngx_http_core_module); clcf->handler = ngx_http_proxy_handler; if (clcf->name.data[clcf->name.len - 1] == '/') { clcf->auto_redirect = 1; } value = cf->args->elts; url = &value[1]; n = ngx_http_script_variables_count(url); if (n) { ngx_memzero(&sc, sizeof(ngx_http_script_compile_t)); sc.cf = cf; sc.source = url; sc.lengths = &plcf->proxy_lengths; sc.values = &plcf->proxy_values; sc.variables = n; sc.complete_lengths = 1; sc.complete_values = 1; if (ngx_http_script_compile(&sc) != NGX_OK) { return NGX_CONF_ERROR; } #if (NGX_HTTP_SSL) plcf->ssl = 1; #endif return NGX_CONF_OK; } if (ngx_strncasecmp(url->data, (u_char *) "http://", 7) == 0) { add = 7; port = 80; } else if (ngx_strncasecmp(url->data, (u_char *) "https://", 8) == 0) { #if (NGX_HTTP_SSL) plcf->ssl = 1; add = 8; port = 443; #else ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "https protocol requires SSL support"); return NGX_CONF_ERROR; #endif } else { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid URL prefix"); return NGX_CONF_ERROR; } ngx_memzero(&u, sizeof(ngx_url_t)); u.url.len = url->len - add; u.url.data = url->data + add; u.default_port = port; u.uri_part = 1; u.no_resolve = 1; plcf->upstream.upstream = ngx_http_upstream_add(cf, &u, 0); if (plcf->upstream.upstream == NULL) { return NGX_CONF_ERROR; } plcf->vars.schema.len = add; plcf->vars.schema.data = url->data; plcf->vars.key_start = plcf->vars.schema; ngx_http_proxy_set_vars(&u, &plcf->vars); plcf->location = clcf->name; if (clcf->named #if (NGX_PCRE) || clcf->regex #endif || clcf->noname) { if (plcf->vars.uri.len) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "\"proxy_pass\" cannot have URI part in " "location given by regular expression, " "or inside named location, " "or inside \"if\" statement, " "or inside \"limit_except\" block"); return NGX_CONF_ERROR; } plcf->location.len = 0; } plcf->url = *url; return NGX_CONF_OK; }
执行请求的时候会调用ngx_http_proxy_handler函数。
struct ngx_http_upstream_s { ngx_http_upstream_handler_pt read_event_handler; ngx_http_upstream_handler_pt write_event_handler; ngx_peer_connection_t peer; ngx_event_pipe_t *pipe; ngx_chain_t *request_bufs; ngx_output_chain_ctx_t output; ngx_chain_writer_ctx_t writer; ngx_http_upstream_conf_t *conf; ngx_http_upstream_srv_conf_t *upstream; #if (NGX_HTTP_CACHE) ngx_array_t *caches; #endif ngx_http_upstream_headers_in_t headers_in; ngx_http_upstream_resolved_t *resolved; ngx_buf_t from_client; ngx_buf_t buffer; off_t length; ngx_chain_t *out_bufs; // 发送到下游的内容 ngx_chain_t *busy_bufs; ngx_chain_t *free_bufs; ngx_int_t (*input_filter_init)(void *data); ngx_int_t (*input_filter)(void *data, ssize_t bytes); void *input_filter_ctx; #if (NGX_HTTP_CACHE) // proxy 模块配置为ngx_http_proxy_create_key ngx_int_t (*create_key)(ngx_http_request_t *r); #endif ngx_int_t (*create_request)(ngx_http_request_t *r); ngx_int_t (*reinit_request)(ngx_http_request_t *r); ngx_int_t (*process_header)(ngx_http_request_t *r); void (*abort_request)(ngx_http_request_t *r); void (*finalize_request)(ngx_http_request_t *r, ngx_int_t rc); ngx_int_t (*rewrite_redirect)(ngx_http_request_t *r, ngx_table_elt_t *h, size_t prefix); ngx_int_t (*rewrite_cookie)(ngx_http_request_t *r, ngx_table_elt_t *h); ngx_msec_t timeout; ngx_http_upstream_state_t *state; ngx_str_t method; ngx_str_t schema; ngx_str_t uri; #if (NGX_HTTP_SSL || NGX_COMPAT) ngx_str_t ssl_name; #endif ngx_http_cleanup_pt *cleanup; unsigned store:1; unsigned cacheable:1; unsigned accel:1; unsigned ssl:1; #if (NGX_HTTP_CACHE) unsigned cache_status:3; #endif unsigned buffering:1; // resp 是否有缓冲 unsigned keepalive:1; unsigned upgrade:1; unsigned request_sent:1; unsigned request_body_sent:1; unsigned header_sent:1; }; struct ngx_peer_connection_s { ngx_connection_t *connection; struct sockaddr *sockaddr; socklen_t socklen; ngx_str_t *name; ngx_uint_t tries; ngx_msec_t start_time; ngx_event_get_peer_pt get; ngx_event_free_peer_pt free; ngx_event_notify_peer_pt notify; void *data; #if (NGX_SSL || NGX_COMPAT) ngx_event_set_peer_session_pt set_session; ngx_event_save_peer_session_pt save_session; #endif ngx_addr_t *local; int type; int rcvbuf; // 赋值给原请求的log ngx_log_t *log; unsigned cached:1; unsigned transparent:1; /* ngx_connection_log_error_e */ unsigned log_error:2; NGX_COMPAT_BEGIN(2) NGX_COMPAT_END }; // 连接上下游的管道,用来把上游的resp发送到下游 struct ngx_event_pipe_s { ngx_connection_t *upstream; // 和上游的连接 ngx_connection_t *downstream; // 和下游的连接 ngx_chain_t *free_raw_bufs; ngx_chain_t *in; // 读取到的resp body 链表,指向链表头 ngx_chain_t **last_in; // 指向resp body链表尾,方便追加 ngx_chain_t *writing; ngx_chain_t *out; ngx_chain_t *free; ngx_chain_t *busy; /* * the input filter i.e. that moves HTTP/1.1 chunks * from the raw bufs to an incoming chain */ ngx_event_pipe_input_filter_pt input_filter; // 处理上游读入数据前回调函数 void *input_ctx; ngx_event_pipe_output_filter_pt output_filter; // 处理下游写出数据前回调函数 void *output_ctx; #if (NGX_THREADS || NGX_COMPAT) ngx_int_t (*thread_handler)(ngx_thread_task_t *task, ngx_file_t *file); void *thread_ctx; ngx_thread_task_t *thread_task; #endif unsigned read:1; unsigned cacheable:1; unsigned single_buf:1; unsigned free_bufs:1; unsigned upstream_done:1; unsigned upstream_error:1; unsigned upstream_eof:1; unsigned upstream_blocked:1; unsigned downstream_done:1; unsigned downstream_error:1; unsigned cyclic_temp_file:1; unsigned aio:1; ngx_int_t allocated; // 已经分配缓存块个数 ngx_bufs_t bufs; // proxy_buffers 缓存块大小和个数 ngx_buf_tag_t tag; ssize_t busy_size; off_t read_length; // 已读取到的字节数 off_t length; off_t max_temp_file_size; ssize_t temp_file_write_size; ngx_msec_t read_timeout; ngx_msec_t send_timeout; ssize_t send_lowat; ngx_pool_t *pool; ngx_log_t *log; ngx_chain_t *preread_bufs; size_t preread_size; ngx_buf_t *buf_to_file; size_t limit_rate; time_t start_sec; ngx_temp_file_t *temp_file; /* STUB */ int num; }; typedef struct { ngx_list_t headers; ngx_uint_t status_n; ngx_str_t status_line; ngx_table_elt_t *status; ngx_table_elt_t *date; ngx_table_elt_t *server; ngx_table_elt_t *connection; ngx_table_elt_t *expires; ngx_table_elt_t *etag; ngx_table_elt_t *x_accel_expires; ngx_table_elt_t *x_accel_redirect; ngx_table_elt_t *x_accel_limit_rate; ngx_table_elt_t *content_type; ngx_table_elt_t *content_length; ngx_table_elt_t *last_modified; ngx_table_elt_t *location; ngx_table_elt_t *accept_ranges; ngx_table_elt_t *www_authenticate; ngx_table_elt_t *transfer_encoding; ngx_table_elt_t *vary; #if (NGX_HTTP_GZIP) ngx_table_elt_t *content_encoding; #endif ngx_array_t cache_control; ngx_array_t cookies; // body 长度 off_t content_length_n; // 最后修改时间(http 缓存使用) time_t last_modified_time; unsigned connection_close:1; unsigned chunked:1; } ngx_http_upstream_headers_in_t;
缓存相关的函数
//是否可以缓存 // 只有可以缓存的请求才可能命中缓存 static ngx_int_t ngx_http_upstream_cache(ngx_http_request_t *r, ngx_http_upstream_t *u) { ngx_int_t rc; ngx_http_cache_t *c; ngx_http_file_cache_t *cache; c = r->cache; if (c == NULL) { // 是否是定义的缓存的方法 if (!(r->method & u->conf->cache_methods)) { return NGX_DECLINED; } // 请求是否要缓存,如果返回NGX_OK 则cache返回要缓存的目录和参数 rc = ngx_http_upstream_cache_get(r, u, &cache); if (rc != NGX_OK) { return rc; } if (r->method == NGX_HTTP_HEAD && u->conf->cache_convert_head) { u->method = ngx_http_core_get_method; } // 创建缓存结构体,赋值到r->cache if (ngx_http_file_cache_new(r) != NGX_OK) { return NGX_ERROR; } // 获取缓存的key 在proxy 模块赋值ngx_http_proxy_create_key if (u->create_key(r) != NGX_OK) { return NGX_ERROR; } /* TODO: add keys */ // 计算key的crc32和md5值。 // 分别保存在 r->cache->crc32 r->cache->main ngx_http_file_cache_create_key(r); header_start 在ngx_http_file_cache_create_key函数中赋值,resp缓存协议头部的大小 if (r->cache->header_start + 256 >= u->conf->buffer_size) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "%V_buffer_size %uz is not enough for cache key, " "it should be increased to at least %uz", &u->conf->module, u->conf->buffer_size, ngx_align(r->cache->header_start + 256, 1024)); r->cache = NULL; return NGX_DECLINED; } // 可以缓存 u->cacheable = 1; c = r->cache; c->body_start = u->conf->buffer_size; c->min_uses = u->conf->cache_min_uses; c->file_cache = cache; // 是否要绕过缓存 switch (ngx_http_test_predicates(r, u->conf->cache_bypass)) { case NGX_ERROR: return NGX_ERROR; case NGX_DECLINED: // 绕过缓存 u->cache_status = NGX_HTTP_CACHE_BYPASS; return NGX_DECLINED; default: /* NGX_OK */ break; } c->lock = u->conf->cache_lock; c->lock_timeout = u->conf->cache_lock_timeout; c->lock_age = u->conf->cache_lock_age; u->cache_status = NGX_HTTP_CACHE_MISS; } // 打开缓存了resp的文件,如果存在 rc = ngx_http_file_cache_open(r); ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http upstream cache: %i", rc); switch (rc) { case NGX_HTTP_CACHE_STALE: if (((u->conf->cache_use_stale & NGX_HTTP_UPSTREAM_FT_UPDATING) || c->stale_updating) && !r->background && u->conf->cache_background_update) { r->cache->background = 1; u->cache_status = rc; rc = NGX_OK; } break; case NGX_HTTP_CACHE_UPDATING: if (((u->conf->cache_use_stale & NGX_HTTP_UPSTREAM_FT_UPDATING) || c->stale_updating) && !r->background) { u->cache_status = rc; rc = NGX_OK; } else { rc = NGX_HTTP_CACHE_STALE; } break; case NGX_OK: u->cache_status = NGX_HTTP_CACHE_HIT; } switch (rc) { case NGX_OK: return NGX_OK; case NGX_HTTP_CACHE_STALE: c->valid_sec = 0; c->updating_sec = 0; c->error_sec = 0; u->buffer.start = NULL; u->cache_status = NGX_HTTP_CACHE_EXPIRED; break; case NGX_DECLINED: if ((size_t) (u->buffer.end - u->buffer.start) < u->conf->buffer_size) { u->buffer.start = NULL; } else { u->buffer.pos = u->buffer.start + c->header_start; u->buffer.last = u->buffer.pos; } break; case NGX_HTTP_CACHE_SCARCE: // 没有缓存 u->cacheable = 0; break; case NGX_AGAIN: return NGX_BUSY; case NGX_ERROR: return NGX_ERROR; default: /* cached NGX_HTTP_BAD_GATEWAY, NGX_HTTP_GATEWAY_TIME_OUT, etc. */ u->cache_status = NGX_HTTP_CACHE_HIT; return rc; } if (ngx_http_upstream_cache_check_range(r, u) == NGX_DECLINED) { u->cacheable = 0; } r->cached = 0; return NGX_DECLINED; } // ngx_http_upstream.c // static ngx_int_t ngx_http_upstream_cache_get(ngx_http_request_t *r, ngx_http_upstream_t *u, ngx_http_file_cache_t **cache) { ngx_str_t *name, val; ngx_uint_t i; ngx_http_file_cache_t **caches; // proxy_cache 该指令配置的,索引的是proxy_cache_path指令配置的共享内存。 // 如果proxy_cache 配置的共享内存是不存在的,在ngx_http_proxy_merge_loc_conf函数中检查cache_zone->data为NULL就会报错。 if (u->conf->cache_zone) { // cache是proxy_cache_path配置的一个结构体 *cache = u->conf->cache_zone->data; return NGX_OK; } // proxy_cache指令配置的包含了变量,并不是共享内存名。 if (ngx_http_complex_value(r, u->conf->cache_value, &val) != NGX_OK) { return NGX_ERROR; } if (val.len == 0 || (val.len == 3 && ngx_strncmp(val.data, "off", 3) == 0)) { // 没有缓存 return NGX_DECLINED; } // u->caches 是 ngx_http_proxy_main_conf_t->caches。 // 通过proxy_cache_path指令配置的。 caches = u->caches->elts; // u->caches 是proxy_cache_path指令配置的信息 // 其中name是配置的path // 从配置的caches目录中查找一个目录结构 for (i = 0; i < u->caches->nelts; i++) { name = &caches[i]->shm_zone->shm.name; if (name->len == val.len && ngx_strncmp(name->data, val.data, val.len) == 0) { *cache = caches[i]; return NGX_OK; } } ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "cache \"%V\" not found", &val); return NGX_ERROR; } // proxy_module // 创建缓存的key。也就是根据哪些字段做唯一索引来缓存。 static ngx_int_t ngx_http_proxy_create_key(ngx_http_request_t *r) { size_t len, loc_len; u_char *p; uintptr_t escape; ngx_str_t *key; ngx_http_upstream_t *u; ngx_http_proxy_ctx_t *ctx; ngx_http_proxy_loc_conf_t *plcf; u = r->upstream; plcf = ngx_http_get_module_loc_conf(r, ngx_http_proxy_module); ctx = ngx_http_get_module_ctx(r, ngx_http_proxy_module); key = ngx_array_push(&r->cache->keys); if (key == NULL) { return NGX_ERROR; } // proxy_cache_key 指令配置的缓存的key。 if (plcf->cache_key.value.data) { if (ngx_http_complex_value(r, &plcf->cache_key, key) != NGX_OK) { return NGX_ERROR; } return NGX_OK; } *key = ctx->vars.key_start; key = ngx_array_push(&r->cache->keys); if (key == NULL) { return NGX_ERROR; } if (plcf->proxy_lengths && ctx->vars.uri.len) { *key = ctx->vars.uri; u->uri = ctx->vars.uri; return NGX_OK; } else if (ctx->vars.uri.len == 0 && r->valid_unparsed_uri) { *key = r->unparsed_uri; u->uri = r->unparsed_uri; return NGX_OK; } loc_len = (r->valid_location && ctx->vars.uri.len) ? plcf->location.len : 0; if (r->quoted_uri || r->space_in_uri || r->internal) { escape = 2 * ngx_escape_uri(NULL, r->uri.data + loc_len, r->uri.len - loc_len, NGX_ESCAPE_URI); } else { escape = 0; } len = ctx->vars.uri.len + r->uri.len - loc_len + escape + sizeof("?") - 1 + r->args.len; p = ngx_pnalloc(r->pool, len); if (p == NULL) { return NGX_ERROR; } key->data = p; if (r->valid_location) { p = ngx_copy(p, ctx->vars.uri.data, ctx->vars.uri.len); } if (escape) { ngx_escape_uri(p, r->uri.data + loc_len, r->uri.len - loc_len, NGX_ESCAPE_URI); p += r->uri.len - loc_len + escape; } else { p = ngx_copy(p, r->uri.data + loc_len, r->uri.len - loc_len); } if (r->args.len > 0) { *p++ = '?'; p = ngx_copy(p, r->args.data, r->args.len); } key->len = p - key->data; u->uri = *key; return NGX_OK; } // ngx_http_file_cache.c // 打开缓存了resp的文件 ngx_int_t ngx_http_file_cache_open(ngx_http_request_t *r) { ngx_int_t rc, rv; ngx_uint_t test; ngx_http_cache_t *c; ngx_pool_cleanup_t *cln; ngx_open_file_info_t of; ngx_http_file_cache_t *cache; ngx_http_core_loc_conf_t *clcf; c = r->cache; // if (c->waiting) { return NGX_AGAIN; } // 正在读取 if (c->reading) { return ngx_http_file_cache_read(r, c); } cache = c->file_cache; if (c->node == NULL) { cln = ngx_pool_cleanup_add(r->pool, 0); if (cln == NULL) { return NGX_ERROR; } cln->handler = ngx_http_file_cache_cleanup; cln->data = c; } rc = ngx_http_file_cache_exists(cache, c); ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http file cache exists: %i e:%d", rc, c->exists); if (rc == NGX_ERROR) { return rc; } if (rc == NGX_AGAIN) { return NGX_HTTP_CACHE_SCARCE; } if (rc == NGX_OK) { if (c->error) { return c->error; } c->temp_file = 1; test = c->exists ? 1 : 0; rv = NGX_DECLINED; } else { /* rc == NGX_DECLINED */ test = cache->sh->cold ? 1 : 0; if (c->min_uses > 1) { if (!test) { return NGX_HTTP_CACHE_SCARCE; } rv = NGX_HTTP_CACHE_SCARCE; } else { c->temp_file = 1; rv = NGX_DECLINED; } } // 缓存文件名r->cache->file.name if (ngx_http_file_cache_name(r, cache->path) != NGX_OK) { return NGX_ERROR; } if (!test) { goto done; } clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); ngx_memzero(&of, sizeof(ngx_open_file_info_t)); of.uniq = c->uniq; of.valid = clcf->open_file_cache_valid; of.min_uses = clcf->open_file_cache_min_uses; of.events = clcf->open_file_cache_events; of.directio = NGX_OPEN_FILE_DIRECTIO_OFF; of.read_ahead = clcf->read_ahead; // 打开缓存文件 if (ngx_open_cached_file(clcf->open_file_cache, &c->file.name, &of, r->pool) != NGX_OK) { switch (of.err) { case 0: return NGX_ERROR; case NGX_ENOENT: case NGX_ENOTDIR: goto done; default: ngx_log_error(NGX_LOG_CRIT, r->connection->log, of.err, ngx_open_file_n " \"%s\" failed", c->file.name.data); return NGX_ERROR; } } ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http file cache fd: %d", of.fd); c->file.fd = of.fd; c->file.log = r->connection->log; c->uniq = of.uniq; c->length = of.size; c->fs_size = (of.fs_size + cache->bsize - 1) / cache->bsize; c->buf = ngx_create_temp_buf(r->pool, c->body_start); if (c->buf == NULL) { return NGX_ERROR; } return ngx_http_file_cache_read(r, c); done: if (rv == NGX_DECLINED) { return ngx_http_file_cache_lock(r, c); } return rv; } // ngx_http_file_cache.c // 检查是否已经缓存了该key,如果没有缓存则分配一个节点保存准备缓存的元数据 // 返回NGX_DECLINED 没有缓存信息 // 返回NGX_OK 有缓存信息 // 返回NGX_AGAIN 有缓存但是不用还要请求后端 static ngx_int_t ngx_http_file_cache_exists(ngx_http_file_cache_t *cache, ngx_http_cache_t *c) { ngx_int_t rc; ngx_http_file_cache_node_t *fcn; ngx_shmtx_lock(&cache->shpool->mutex); fcn = c->node; if (fcn == NULL) { fcn = ngx_http_file_cache_lookup(cache, c->key); } if (fcn) { ngx_queue_remove(&fcn->queue); if (c->node == NULL) { fcn->uses++; fcn->count++; } if (fcn->error) { if (fcn->valid_sec < ngx_time()) { goto renew; } rc = NGX_OK; goto done; } if (fcn->exists || fcn->uses >= c->min_uses) { c->exists = fcn->exists; if (fcn->body_start) { c->body_start = fcn->body_start; } rc = NGX_OK; goto done; } rc = NGX_AGAIN; goto done; } fcn = ngx_slab_calloc_locked(cache->shpool, sizeof(ngx_http_file_cache_node_t)); if (fcn == NULL) { ngx_http_file_cache_set_watermark(cache); ngx_shmtx_unlock(&cache->shpool->mutex); (void) ngx_http_file_cache_forced_expire(cache); ngx_shmtx_lock(&cache->shpool->mutex); fcn = ngx_slab_calloc_locked(cache->shpool, sizeof(ngx_http_file_cache_node_t)); if (fcn == NULL) { ngx_log_error(NGX_LOG_ALERT, ngx_cycle->log, 0, "could not allocate node%s", cache->shpool->log_ctx); rc = NGX_ERROR; goto failed; } } cache->sh->count++; ngx_memcpy((u_char *) &fcn->node.key, c->key, sizeof(ngx_rbtree_key_t)); ngx_memcpy(fcn->key, &c->key[sizeof(ngx_rbtree_key_t)], NGX_HTTP_CACHE_KEY_LEN - sizeof(ngx_rbtree_key_t)); ngx_rbtree_insert(&cache->sh->rbtree, &fcn->node); fcn->uses = 1; fcn->count = 1; renew: rc = NGX_DECLINED; fcn->valid_msec = 0; fcn->error = 0; fcn->exists = 0; fcn->valid_sec = 0; fcn->uniq = 0; fcn->body_start = 0; fcn->fs_size = 0; done: fcn->expire = ngx_time() + cache->inactive; ngx_queue_insert_head(&cache->sh->queue, &fcn->queue); c->uniq = fcn->uniq; c->error = fcn->error; c->node = fcn; failed: ngx_shmtx_unlock(&cache->shpool->mutex); return rc; } // ngx_open_file_cache.c // ngx_int_t ngx_open_cached_file(ngx_open_file_cache_t *cache, ngx_str_t *name, ngx_open_file_info_t *of, ngx_pool_t *pool) { time_t now; uint32_t hash; ngx_int_t rc; ngx_file_info_t fi; ngx_pool_cleanup_t *cln; ngx_cached_open_file_t *file; ngx_pool_cleanup_file_t *clnf; ngx_open_file_cache_cleanup_t *ofcln; of->fd = NGX_INVALID_FILE; of->err = 0; if (cache == NULL) { if (of->test_only) { if (ngx_file_info_wrapper(name, of, &fi, pool->log) == NGX_FILE_ERROR) { return NGX_ERROR; } of->uniq = ngx_file_uniq(&fi); of->mtime = ngx_file_mtime(&fi); of->size = ngx_file_size(&fi); of->fs_size = ngx_file_fs_size(&fi); of->is_dir = ngx_is_dir(&fi); of->is_file = ngx_is_file(&fi); of->is_link = ngx_is_link(&fi); of->is_exec = ngx_is_exec(&fi); return NGX_OK; } cln = ngx_pool_cleanup_add(pool, sizeof(ngx_pool_cleanup_file_t)); if (cln == NULL) { return NGX_ERROR; } rc = ngx_open_and_stat_file(name, of, pool->log); if (rc == NGX_OK && !of->is_dir) { cln->handler = ngx_pool_cleanup_file; clnf = cln->data; clnf->fd = of->fd; clnf->name = name->data; clnf->log = pool->log; } return rc; } cln = ngx_pool_cleanup_add(pool, sizeof(ngx_open_file_cache_cleanup_t)); if (cln == NULL) { return NGX_ERROR; } now = ngx_time(); hash = ngx_crc32_long(name->data, name->len); file = ngx_open_file_lookup(cache, name, hash); if (file) { file->uses++; ngx_queue_remove(&file->queue); if (file->fd == NGX_INVALID_FILE && file->err == 0 && !file->is_dir) { /* file was not used often enough to keep open */ rc = ngx_open_and_stat_file(name, of, pool->log); if (rc != NGX_OK && (of->err == 0 || !of->errors)) { goto failed; } goto add_event; } if (file->use_event || (file->event == NULL && (of->uniq == 0 || of->uniq == file->uniq) && now - file->created < of->valid #if (NGX_HAVE_OPENAT) && of->disable_symlinks == file->disable_symlinks && of->disable_symlinks_from == file->disable_symlinks_from #endif )) { if (file->err == 0) { of->fd = file->fd; of->uniq = file->uniq; of->mtime = file->mtime; of->size = file->size; of->is_dir = file->is_dir; of->is_file = file->is_file; of->is_link = file->is_link; of->is_exec = file->is_exec; of->is_directio = file->is_directio; if (!file->is_dir) { file->count++; ngx_open_file_add_event(cache, file, of, pool->log); } } else { of->err = file->err; #if (NGX_HAVE_OPENAT) of->failed = file->disable_symlinks ? ngx_openat_file_n : ngx_open_file_n; #else of->failed = ngx_open_file_n; #endif } goto found; } ngx_log_debug4(NGX_LOG_DEBUG_CORE, pool->log, 0, "retest open file: %s, fd:%d, c:%d, e:%d", file->name, file->fd, file->count, file->err); if (file->is_dir) { /* * chances that directory became file are very small * so test_dir flag allows to use a single syscall * in ngx_file_info() instead of three syscalls */ of->test_dir = 1; } of->fd = file->fd; of->uniq = file->uniq; rc = ngx_open_and_stat_file(name, of, pool->log); if (rc != NGX_OK && (of->err == 0 || !of->errors)) { goto failed; } if (of->is_dir) { if (file->is_dir || file->err) { goto update; } /* file became directory */ } else if (of->err == 0) { /* file */ if (file->is_dir || file->err) { goto add_event; } if (of->uniq == file->uniq) { if (file->event) { file->use_event = 1; } of->is_directio = file->is_directio; goto update; } /* file was changed */ } else { /* error to cache */ if (file->err || file->is_dir) { goto update; } /* file was removed, etc. */ } if (file->count == 0) { ngx_open_file_del_event(file); if (ngx_close_file(file->fd) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_ALERT, pool->log, ngx_errno, ngx_close_file_n " \"%V\" failed", name); } goto add_event; } ngx_rbtree_delete(&cache->rbtree, &file->node); cache->current--; file->close = 1; goto create; } /* not found */ rc = ngx_open_and_stat_file(name, of, pool->log); if (rc != NGX_OK && (of->err == 0 || !of->errors)) { goto failed; } create: if (cache->current >= cache->max) { ngx_expire_old_cached_files(cache, 0, pool->log); } file = ngx_alloc(sizeof(ngx_cached_open_file_t), pool->log); if (file == NULL) { goto failed; } file->name = ngx_alloc(name->len + 1, pool->log); if (file->name == NULL) { ngx_free(file); file = NULL; goto failed; } ngx_cpystrn(file->name, name->data, name->len + 1); file->node.key = hash; ngx_rbtree_insert(&cache->rbtree, &file->node); cache->current++; file->uses = 1; file->count = 0; file->use_event = 0; file->event = NULL; add_event: ngx_open_file_add_event(cache, file, of, pool->log); update: file->fd = of->fd; file->err = of->err; #if (NGX_HAVE_OPENAT) file->disable_symlinks = of->disable_symlinks; file->disable_symlinks_from = of->disable_symlinks_from; #endif if (of->err == 0) { file->uniq = of->uniq; file->mtime = of->mtime; file->size = of->size; file->close = 0; file->is_dir = of->is_dir; file->is_file = of->is_file; file->is_link = of->is_link; file->is_exec = of->is_exec; file->is_directio = of->is_directio; if (!of->is_dir) { file->count++; } } file->created = now; found: file->accessed = now; ngx_queue_insert_head(&cache->expire_queue, &file->queue); ngx_log_debug5(NGX_LOG_DEBUG_CORE, pool->log, 0, "cached open file: %s, fd:%d, c:%d, e:%d, u:%d", file->name, file->fd, file->count, file->err, file->uses); if (of->err == 0) { if (!of->is_dir) { cln->handler = ngx_open_file_cleanup; ofcln = cln->data; ofcln->cache = cache; ofcln->file = file; ofcln->min_uses = of->min_uses; ofcln->log = pool->log; } return NGX_OK; } return NGX_ERROR; failed: if (file) { ngx_rbtree_delete(&cache->rbtree, &file->node); cache->current--; if (file->count == 0) { if (file->fd != NGX_INVALID_FILE) { if (ngx_close_file(file->fd) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_ALERT, pool->log, ngx_errno, ngx_close_file_n " \"%s\" failed", file->name); } } ngx_free(file->name); ngx_free(file); } else { file->close = 1; } } if (of->fd != NGX_INVALID_FILE) { if (ngx_close_file(of->fd) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_ALERT, pool->log, ngx_errno, ngx_close_file_n " \"%V\" failed", name); } } return NGX_ERROR; }
概述
proxy模块是通过ups机制实现了反向代理功能的。该模块非常复杂。 在这里不会说ups机制,除非引入的非常深入。其余的均会带过,了解ups机制请看上篇。
proxy模块通过proxy_cache[_xxx]指令控制proxy缓存。
proxy_hide_header field; 隐藏上游resp的header。 proxy_ignore_headers field ...; 忽略上游resp的header,不起作用。一般是x-accel-xxx的头。
和缓存相关的: proxy_cache_path 配置缓存文件的磁盘路径和存放缓存元数据的共享内存。 proxy_cache_valid 配置缓存的状态码和对应有效时常。 proxy_cache 是否开启缓存,和开启缓存要存放的路径和元数据共享内存,和proxy_cache_path对应,可以为变量。 proxy_cache_convert_head 是否把head请求转成get请求缓存。 proxy_cache_bypass 有缓存但是不从缓存获取。bypass。
代码
执行请求的时候会调用ngx_http_proxy_handler函数。
缓存相关的函数