apache / apisix

The Cloud-Native API Gateway
https://apisix.apache.org/blog/
Apache License 2.0
14.42k stars 2.5k forks source link

request help: Whether the active health check is not perfect? #4796

Closed kuberxy closed 2 years ago

kuberxy commented 3 years ago

Issue description

When the upstream network is unavailable or the upstream is unhealthy, apisix will still forward the request to the problem node I did a test in two scenarios, as follows

scene 1: upstream network is unavailable

nginx1(One of the upstream)

# config
mkdir /tmp/upstream
cd /tmp/upstream
mkdir logs/ conf/

tee conf/openresty.conf <<EOF
worker_processes  1;
error_log logs/error.log;

events {
    worker_connections 1024;
}

http {
    server {
        listen 8090;
        location / {
            default_type text/html;
            content_by_lua_block {
                ngx.say("<p>hello, world</p>")
            }
        }
    }
}
EOF

# start
sudo openresty -p `pwd`/ -c conf/openresty.conf

# drop package 
sudo iptables -A OUTPUT -p tcp --sport 8090 ! --dport 8090 -j DROP 

nginx2 and nginx3 (Other upstream)

# config
mkdir /tmp/upstream
cd /tmp/upstream
mkdir logs/ conf/

tee conf/openresty.conf <<EOF
worker_processes  1;
error_log logs/error.log;

events {
    worker_connections 1024;
}

http {
    server {
        listen 8090;
        location / {
            default_type text/html;
            content_by_lua_block {
                ngx.say("<p>hello, world</p>")
            }
        }
    }
}
EOF

# start
sudo openresty -p `pwd`/ -c conf/openresty.conf

use apisix as proxy

config.yaml:

apisix:
  config_center: yaml
  enable_admin: false
  enable_server_tokens: false
  enable_control: false
  node_listen: 80

nginx_config:  
  error_log_level: "error" 
  http: 
    access_log_format: '{"@timestamp":"$time_iso8601","client_ip":"$clientRealIp","accessip_list":"$proxy_add_x_forwarded_for","http_host":"$host","url":"$request_uri","http_referer":"$http_referer","user_agent":"$http_user_agent","method":"$request_method","status_code":"$status","total_bytes_sent":"$bytes_sent","body_bytes_sent":"$body_bytes_sent","request_time":"$request_time","upstream_response_time":"$upstream_time" }'
    access_log_format_escape: json
  http_configuration_snippet: |
    # lua timer limitations
    lua_max_pending_timers 40960;
    lua_max_running_timers 256;

    # access log vriable
    map $http_x_forwarded_for $clientRealIp {
        "" $remote_addr;
        ~^(?P<firstAddr>[0-9\.]+),?.*$ $firstAddr;
    }

    map $upstream_response_time $upstream_time {
        default 0;
        ~^(?P<time>[0-9]+\.[0-9]+)$ $time;
    }

    # other conf
    http2_max_header_size 4000k;
    http2_max_field_size 4000k;

    proxy_read_timeout 240s;

    client_header_buffer_size 512k;
    large_client_header_buffers 4 512k;

apisix.yaml:

upstreams:
    -
        id: 1
        name: "nginx"
        nodes:
            "nginx1:8090": 1
            "nginx2:8090": 1
            "nginx3:8090": 1
        type: roundrobin
        checks:
            active:
                timeout: 5
                http_path: "/active"
                healthy:
                    http_statuses:
                    - 200
                    - 301
                    - 302
                    - 304
                    successes: 1
                    interval: 2
                unhealthy:
                    http_statuses:
                    - 500
                    - 502
                    - 503
                    - 504
                    http_failures: 2
                    interval: 1
            passive:
                healthy:
                    http_statuses:
                    - 200
                    - 206
                    - 301
                    - 302
                    - 304
                    successes: 3
                unhealthy:
                    http_statuses:
                    - 499
                    - 500
                    - 503
                    - 502
                    - 504
                    tcp_failures: 3
                    http_failures: 3

routes:
    -
        uri: /*
        upstream_id: 1

#END

The request was still forwarded to nginx1 and was retried after 60 seconds

$ time for i in $(seq 15); do curl 192.168.2.11;done
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>

real    1m0.276s
user    0m0.130s
sys 0m0.085s

use openresty as proxy

# config
mkdir /tmp/proxy
cd /tmp/proxy
mkdir logs/ conf/

tee conf/openresty.conf <<\EOF
#user  nobody;
#worker_processes  1;

error_log  logs/error.log;

pid        logs/nginx.pid;

events {
    worker_connections  1024;
}

http {
    default_type  application/octet-stream;

    # access log vriable
    map $http_x_forwarded_for $clientRealIp {
        "" $remote_addr;
        ~^(?P<firstAddr>[0-9\.]+),?.*$ $firstAddr;
    }

    map $upstream_response_time $upstream_time {
        default 0;
        ~^(?P<time>[0-9]+\.[0-9]+)$ $time;
    }

    log_format json '{'
        '"@timestamp":"$time_iso8601",'
        '"client_ip":"$clientRealIp",'
        '"accessip_list":"$proxy_add_x_forwarded_for",'
        '"http_host":"$host",'
        '"url":"$request_uri",'
        '"http_referer":"$http_referer",'
        '"user_agent":"$http_user_agent",'
        '"method":"$request_method",'
        '"status_code":"$status",'
        '"total_bytes_sent":"$bytes_sent",'
        '"body_bytes_sent":"$body_bytes_sent",'
        '"request_time":"$request_time",'
        '"upstream_response_time":"$upstream_time"'
    '}';

    access_log  logs/access.log  json;

    sendfile        on;
    #tcp_nopush     on;

    #keepalive_timeout  0;
    keepalive_timeout  65;

    #gzip  on;

    # cy app conf
    http2_max_header_size 4000k;
    http2_max_field_size 4000k;

    proxy_read_timeout 240s;

    client_header_buffer_size 512k;
    large_client_header_buffers 4 512k;

upstream nginx {
    server nginx1:8090 weight=1 max_conns=0 max_fails=1 fail_timeout=10;
    server nginx2:8090 weight=1 max_conns=0 max_fails=1 fail_timeout=10;
    server nginx3:8090 weight=1 max_conns=0 max_fails=1 fail_timeout=10;

}

lua_shared_dict healthcheck 1m;

lua_socket_log_errors off;

init_worker_by_lua_block {
    local hc = require "resty.upstream.healthcheck"

    local ok, err = hc.spawn_checker{
        shm = "healthcheck",
        upstream = "nginx",
        type = "http",

        http_req = "GET /active HTTP/1.1\r\nHost: nginx\r\n\r\n",

        interval = 2000,
        timeout = 5000,
        fall = 3,
        rise = 2,
        valid_statuses = {200, 302},
        concurrency = 10,
    }

    if not ok then
        ngx.log(ngx.ERR, "failed to spawn health checker: ", err)
        return
    end
}

server {
    server_name 0.0.0.0;
    listen 12354;

    location = /status {
        access_log off;
        allow 127.0.0.1;
        deny all;

        default_type text/plain;
        content_by_lua_block {
            local hc = require "resty.upstream.healthcheck"
            ngx.print(hc.status_page())
        }
    }
}

server {
    server_name 0.0.0.0;
    listen      80;

    location / {
        proxy_pass http://nginx;
        proxy_set_header Host $http_host;
    }

}

}

EOF

# start
sudo openresty -p `pwd`/ -c conf/openresty.conf

The request is not forwarded to the upstream of the lost packet

$ time for i in $(seq 15); do curl 192.168.2.11;done
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>

real    0m0.201s
user    0m0.136s
sys 0m0.056s

scene 2: upstream is unhealthy

nginx1(One of the upstream)

# config
mkdir /tmp/upstream
cd /tmp/upstream
mkdir logs/ conf/

tee conf/openresty.conf <<EOF
worker_processes  1;
error_log logs/error.log;

events {
    worker_connections 1024;
}

http {
    server {
        listen 8090;

        location /active {
            default_type text/html;
            content_by_lua_block {
                ngx.exit(500)
            }
        }   

        location / {
            default_type text/html;
            content_by_lua_block {
                ngx.say("<p>healthy 500</p>")
            }
        }
    }
}
EOF

# start
sudo openresty -p `pwd`/ -c conf/openresty.conf

nginx2 and nginx3 (Other upstream)

Same configuration as above

use apisix as proxy

Same configuration as above

The request was still forwarded to nginx1

$ for i in $(seq 15); do curl 192.168.2.11;done
<p>healthy 500</p>
<p>healthy 500</p>
<p>hello, world</p>
<p>healthy 500</p>
<p>hello, world</p>
<p>hello, world</p>
<p>healthy 500</p>
<p>hello, world</p>
<p>hello, world</p>
<p>healthy 500</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>healthy 500</p>
<p>hello, world</p>

use openresty as proxy

Same configuration as above

The request is not forwarded to unhealthy upstream

$ for i in $(seq 15); do curl 192.168.2.11;done
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>

Environment

spacewander commented 3 years ago

I tried to reproduce it but failed.

Here is my upstream:

worker_processes 1;
daemon off;
error_log stderr warn;
events {
    worker_connections 65535;
}
http {
    access_log off;
    resolver 114.114.114.114;
    server {
        listen 1980;
        location /active {
            default_type text/html;
            content_by_lua_block {
                ngx.exit(500)
            }
        }

        location / {
            default_type text/html;
            content_by_lua_block {
                ngx.say("<p>healthy 500</p>")
            }
        }
    }
    server {
        listen 8090;
        location / {
            default_type text/html;
            content_by_lua_block {
                ngx.say("<p>hello, world</p>")
            }
        }
    }
}

My conf:

apisix:
  config_center: yaml
  enable_admin: false
  enable_server_tokens: false
  enable_control: false

nginx_config:
  error_log_level: "warn"
  http:
    access_log_format: '{"@timestamp":"$time_iso8601","client_ip":"$clientRealIp","accessip_list":"$proxy_add_x_forwarded_for","http_host":"$host","url":"$request_uri","http_referer":"$http_referer","user_agent":"$http_user_agent","method":"$request_method","status_code":"$status","total_bytes_sent":"$bytes_sent","body_bytes_sent":"$body_bytes_sent","request_time":"$request_time","upstream_response_time":"$upstream_time" }'
    access_log_format_escape: json
  http_configuration_snippet: |
    # lua timer limitations
    lua_max_pending_timers 40960;
    lua_max_running_timers 256;

    # access log vriable
    map $http_x_forwarded_for $clientRealIp {
        "" $remote_addr;
        ~^(?P<firstAddr>[0-9\.]+),?.*$ $firstAddr;
    }

    map $upstream_response_time $upstream_time {
        default 0;
        ~^(?P<time>[0-9]+\.[0-9]+)$ $time;
    }

    # other conf
    http2_max_header_size 4000k;
    http2_max_field_size 4000k;

    proxy_read_timeout 240s;

    client_header_buffer_size 512k;
    large_client_header_buffers 4 512k;
upstreams:
    -
        id: 1
        name: "nginx"
        nodes:
            "127.0.0.1:1980": 1
            "127.0.0.1:8090": 1
            "127.0.0.2:8090": 1
        type: roundrobin
        checks:
            active:
                timeout: 5
                http_path: "/active"
                healthy:
                    http_statuses:
                    - 200
                    - 301
                    - 302
                    - 304
                    successes: 1
                    interval: 2
                unhealthy:
                    http_statuses:
                    - 500
                    - 502
                    - 503
                    - 504
                    http_failures: 2
                    interval: 1
            passive:
                healthy:
                    http_statuses:
                    - 200
                    - 206
                    - 301
                    - 302
                    - 304
                    successes: 3
                unhealthy:
                    http_statuses:
                    - 499
                    - 500
                    - 503
                    - 502
                    - 504
                    tcp_failures: 3
                    http_failures: 3

routes:
    -
        uri: /*
        upstream_id: 1

#END

I start to try it once I see this log:

2021/08/11 12:05:19 [warn] 10803#10803: *1438 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (2/2) for '(127.0.0.1:1980)', context: ngx.timer, client: 127.0.0.1, server: 0.0.0.0:9080

And the result...

 ¥ time for i in $(seq 15); do curl 127.0.0.1:9080;done
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
 ¥ time for i in $(seq 15); do curl 127.0.0.1:9080;sleep 1;done
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
<p>hello, world</p>
tokers commented 3 years ago

@kuberxy Have you ever seen some related logs in APISIX error.log?

kuberxy commented 3 years ago

@spacewander Your test is correct. When I change the hostname to IP, it is normal, but why can't I use the hostname?

kuberxy commented 3 years ago

@tokers I change the log level to warn, The log I see is like this:

2021/08/12 09:14:38 [warn] 6521#6521: *220612 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:14:40 [warn] 6509#6509: *221369 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:14:42 [warn] 6509#6509: *222085 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (2/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:14:46 [warn] 6512#6512: *223383 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:14:48 [warn] 6510#6510: *224098 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (2/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:15:09 [warn] 6520#6520: *230901 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:15:11 [warn] 6520#6520: *231551 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (2/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:15:15 [warn] 6517#6517: *232944 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:15:16 [warn] 6509#6509: *233183 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (1/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:15:17 [warn] 6509#6509: *233552 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:15:18 [warn] 6508#6508: *234044 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (1/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:15:18 [warn] 6511#6511: *234054 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (2/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:15:18 [warn] 6519#6519: *234064 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (3/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:15:18 [warn] 6514#6514: *234087 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (4/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:15:18 [warn] 6510#6510: *234089 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (5/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:15:19 [warn] 6509#6509: *234301 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:15:21 [warn] 6509#6509: *235010 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (2/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:16:10 [warn] 6514#6514: *250882 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:16:10 [warn] 6509#6509: *250933 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (1/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:12 [warn] 6509#6509: *251650 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:16:13 [warn] 6514#6514: *251946 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (1/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:13 [warn] 6511#6511: *251949 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (2/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:14 [warn] 6509#6509: *252326 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:16:14 [warn] 6512#6512: *252532 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (1/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:14 [warn] 6513#6513: *252547 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (2/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:14 [warn] 6519#6519: *252554 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (3/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:14 [warn] 6520#6520: *252564 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (4/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:16 [warn] 6509#6509: *252958 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:16:16 [warn] 6507#6507: *252997 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (1/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:16 [warn] 6521#6521: *253032 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (2/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:16 [warn] 6510#6510: *253080 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (3/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:18 [warn] 6509#6509: *253703 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:16:19 [warn] 6520#6520: *254125 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (1/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:19 [warn] 6517#6517: *254139 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (2/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:20 [warn] 6518#6518: *254277 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:16:21 [warn] 6515#6515: *254674 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (1/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:21 [warn] 6511#6511: *254676 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (2/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:21 [warn] 6506#6506: *254682 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (3/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:22 [warn] 6518#6518: *255005 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:16:23 [warn] 6507#6507: *255263 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (1/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:23 [warn] 6519#6519: *255280 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (2/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:23 [warn] 6514#6514: *255285 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (3/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:23 [warn] 6515#6515: *255289 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (4/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:24 [warn] 6521#6521: *255719 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:16:25 [warn] 6510#6510: *255959 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (1/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:25 [warn] 6511#6511: *255984 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (2/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:25 [warn] 6508#6508: *255998 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (3/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:26 [warn] 6521#6521: *256326 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:16:26 [warn] 6512#6512: *256452 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (1/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:26 [warn] 6520#6520: *256456 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (2/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:26 [warn] 6507#6507: *256482 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (3/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:26 [warn] 6510#6510: *256483 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (4/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:28 [warn] 6521#6521: *257066 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:16:29 [warn] 6518#6518: *257301 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (1/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:29 [warn] 6514#6514: *257303 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (2/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:29 [warn] 6516#6516: *257312 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (3/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:29 [warn] 6521#6521: *257322 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (4/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:30 [warn] 6521#6521: *257639 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:16:32 [warn] 6506#6506: *258352 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (2/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
spacewander commented 3 years ago
2021/08/12 09:16:24 [warn] 6521#6521: *255719 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:16:25 [warn] 6510#6510: *255959 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (1/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:25 [warn] 6511#6511: *255984 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (2/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:25 [warn] 6508#6508: *255998 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (3/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:16:26 [warn] 6521#6521: *256326 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80

It seems your upstream doesn't return failure every time?

kuberxy commented 3 years ago

@spacewander Yes. This phenomenon occurs when the IP(upstream) is changed to the host name. You can test it with the following configuration

# apisix.yaml
upstreams:
    -
        id: 1
        name: "nginx"
        nodes:
            "nginx1:1980": 1
            "nginx2:8090": 1
            "nginx3:8090": 1

# /etc/hosts
127.0.0.1 nginx1
127.0.0.2 nginx2
127.0.0.3 nginx3

# client
for i in $(seq 2); do curl 127.0.0.1;done
for i in $(seq 20); do curl 127.0.0.1;done
for i in $(seq 200); do curl 127.0.0.1;done
for i in $(seq 2000); do curl 127.0.0.1;done
tokers commented 3 years ago

Are you sure the resolving results of nginx1, nginx2 and nginx3 are always the same?

kuberxy commented 3 years ago

@tokers There are four hosts in my test environment, the actual configuration of the proxy node is like this

# apisix.yaml
upstreams:
    -
        id: 1
        name: "nginx"
        nodes:
            "nginx1:8090": 1
            "nginx2:8090": 1
            "nginx3:8090": 1

# /etc/hosts
192.168.2.12 nginx1
192.168.2.13 nginx2
192.168.2.14 nginx3

# client
for i in $(seq 2); do curl 192.168.2.11;done
for i in $(seq 20); do curl 192.168.2.11;done
for i in $(seq 200); do curl 192.168.2.11;done
for i in $(seq 2000); do curl 192.168.2.11;done

When the hostname is used in the upstream, the health check will have a problem

tokers commented 3 years ago

Just like your error logs showed, the node was resolved to IP but from the same node, the health check result is not constant.

2021/08/12 09:15:19 [warn] 6509#6509: *234301 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:15:21 [warn] 6509#6509: *235010 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (2/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:16:10 [warn] 6514#6514: *250882 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) unhealthy HTTP increment (1/2) for '(192.168.2.12:8090)', context: ngx.timer, client: 192.168.3.26, server: 0.0.0.0:80
2021/08/12 09:16:10 [warn] 6509#6509: *250933 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (1/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"

Could you try to send health check probes with curl utility by yourself and see the results.

tzssangglass commented 3 years ago

take a look at:https://github.com/apache/apisix/pull/4663/files#diff-7df2f564c90d5b03a916952ccdfffa15e845dd236ec26a15d4fa6ce3222b7259R721-R729

points to check:

  1. is the active health check enabled?
  2. is the http_path in the active health check correct?
kuberxy commented 3 years ago

@tokers The path for health check is /active(return 500), and the path for client access is /(return 200)

2021/08/12 09:15:18 [warn] 6508#6508: *234044 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (1/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:15:18 [warn] 6511#6511: *234054 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (2/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:15:18 [warn] 6519#6519: *234064 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (3/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:15:18 [warn] 6514#6514: *234087 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (4/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:15:18 [warn] 6510#6510: *234089 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (5/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"

Here is a passive health check.

In addition, I don't quite understand your question. After I changed the host name to IP, the health check was normal. And when I use openresty as a proxy, even if the upstream uses the host name, there is no such problem.

If you suspect that there is a problem with the hostname resolution in my environment, you can test it yourself

tokers commented 3 years ago

@tokers The path for health check is /active(return 500), and the path for client access is /(return 200)

2021/08/12 09:15:18 [warn] 6508#6508: *234044 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (1/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:15:18 [warn] 6511#6511: *234054 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (2/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:15:18 [warn] 6519#6519: *234064 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (3/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:15:18 [warn] 6514#6514: *234087 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (4/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"
2021/08/12 09:15:18 [warn] 6510#6510: *234089 [lua] healthcheck.lua:1107: log(): [healthcheck] (upstream#/upstreams/1) healthy SUCCESS increment (5/3) for '(192.168.2.12:8090)' while logging request, client: 192.168.3.26, server: , request: "GET / HTTP/1.1", upstream: "http://192.168.2.12:8090/", host: "192.168.2.11"

Here is a passive health check.

In addition, I don't quite understand your question. After I changed the host name to IP, the health check was normal. And when I use openresty as a proxy, even if the upstream uses the host name, there is no such problem.

If you suspect that there is a problem with the hostname resolution in my environment, you can test it yourself

I mean, just using curl to send the health check requests to your backends and see what happens.

kuberxy commented 3 years ago

@tokers

$ curl nginx1:8090/active
<html>
<head><title>500 Internal Server Error</title></head>
<body>
<center><h1>500 Internal Server Error</h1></center>
<hr><center>openresty/1.15.8.3</center>
</body>
</html>

$ curl nginx2:8090/active
<p>hello, world</p>

$ curl nginx3:8090/active
<p>hello, world</p>
kuberxy commented 3 years ago

@tzssangglass The access logs of nginx1, nginx2 and nginx3 are like this

# nginx1
$ tail -f logs/access.log
192.168.2.11 - - [16/Aug/2021:09:10:33 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:35 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:36 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:38 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:39 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:40 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:42 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:43 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:44 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:45 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:46 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:48 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:50 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:51 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:52 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:54 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:56 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:58 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:59 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:01 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:02 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:04 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:05 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:06 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:08 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:10 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:11 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:12 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:13 +0800] "GET /active HTTP/1.0" 500 183 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:13 +0800] "GET / HTTP/1.1" 200 30 "-" "curl/7.58.0"
192.168.2.11 - - [16/Aug/2021:09:11:13 +0800] "GET / HTTP/1.1" 200 30 "-" "curl/7.58.0"
192.168.2.11 - - [16/Aug/2021:09:11:13 +0800] "GET / HTTP/1.1" 200 30 "-" "curl/7.58.0"
...

# nginx2
$ tail -f logs/access.log
192.168.2.11 - - [16/Aug/2021:09:10:33 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:35 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:39 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:41 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:43 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:45 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:49 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:53 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:57 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:59 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:01 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:03 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:05 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:09 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:11 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:13 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:13 +0800] "GET / HTTP/1.1" 200 31 "-" "curl/7.58.0"
192.168.2.11 - - [16/Aug/2021:09:11:13 +0800] "GET / HTTP/1.1" 200 31 "-" "curl/7.58.0"
192.168.2.11 - - [16/Aug/2021:09:11:13 +0800] "GET / HTTP/1.1" 200 31 "-" "curl/7.58.0"
...

# nginx3
$ tail -f logs/access.log
192.168.2.11 - - [16/Aug/2021:09:10:33 +0800] "GET / HTTP/1.1" 200 31 "-" "curl/7.58.0"
192.168.2.11 - - [16/Aug/2021:09:10:33 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:35 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:39 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:41 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:43 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:45 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:49 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:53 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:57 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:10:59 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:01 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:03 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:05 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:09 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:11 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:13 +0800] "GET /active HTTP/1.0" 200 20 "-" "-"
192.168.2.11 - - [16/Aug/2021:09:11:13 +0800] "GET / HTTP/1.1" 200 31 "-" "curl/7.58.0"
192.168.2.11 - - [16/Aug/2021:09:11:13 +0800] "GET / HTTP/1.1" 200 31 "-" "curl/7.58.0"
192.168.2.11 - - [16/Aug/2021:09:11:13 +0800] "GET / HTTP/1.1" 200 31 "-" "curl/7.58.0"
...
github-actions[bot] commented 2 years ago

This issue has been marked as stale due to 350 days of inactivity. It will be closed in 2 weeks if no further activity occurs. If this issue is still relevant, please simply write any comment. Even if closed, you can still revive the issue at any time or discuss it on the dev@apisix.apache.org list. Thank you for your contributions.

github-actions[bot] commented 2 years ago

This issue has been closed due to lack of activity. If you think that is incorrect, or the issue requires additional review, you can revive the issue at any time.