Open michael-newsrx opened 4 years ago
Full output from: http://192.168.1.129:8080/status/server
{
"ckpt_name": "bert_model.ckpt",
"client": "cd350caa-a83e-46c5-82e4-cbe06f9f3a7a",
"config_name": "bert_config.json",
"cors": "*",
"cpu": false,
"device_map": [],
"do_lower_case": true,
"fixed_embed_length": false,
"fp16": false,
"gpu_memory_fraction": 0.5,
"graph_tmp_dir": null,
"http_max_connect": 10,
"http_port": 8080,
"mask_cls_sep": false,
"max_batch_size": 256,
"max_seq_len": null,
"model_dir": "/model",
"no_position_embeddings": false,
"no_special_token": false,
"num_concurrent_socket": 8,
"num_process": 3,
"num_worker": 1,
"pooling_layer": [
-2
],
"pooling_strategy": 2,
"port": 5555,
"port_out": 5556,
"prefetch_size": 10,
"priority_batch_size": 16,
"python_version": "3.5.2 (default, Nov 23 2017, 16:37:01) \n[GCC 5.4.0 20160609]",
"pyzmq_version": "17.1.2",
"server_current_time": "2019-12-19 19:40:43.457178",
"server_start_time": "2019-12-19 19:37:52.060314",
"server_version": "1.9.9",
"show_tokens_to_client": true,
"statistic": {
"avg_last_two_interval": 16.154498574964236,
"avg_request_per_client": 2,
"avg_request_per_second": 0.06190226179782333,
"avg_size_per_request": 1,
"max_last_two_interval": 16.154498574964236,
"max_request_per_client": 2,
"max_request_per_second": 0.06190226179782333,
"max_size_per_request": 1,
"min_last_two_interval": 16.154498574964236,
"min_request_per_client": 2,
"min_request_per_second": 0.06190226179782333,
"min_size_per_request": 1,
"num_active_client": 1,
"num_data_request": 1,
"num_max_last_two_interval": 1,
"num_max_request_per_client": 1,
"num_max_request_per_second": 1,
"num_max_size_per_request": 1,
"num_min_last_two_interval": 1,
"num_min_request_per_client": 1,
"num_min_request_per_second": 1,
"num_min_size_per_request": 1,
"num_sys_request": 1,
"num_total_client": 1,
"num_total_request": 2,
"num_total_seq": 1
},
"status": 200,
"tensorflow_version": [
"1",
"12",
"0"
],
"tuned_model_dir": null,
"ventilator -> worker": [
"ipc://tmpQH1gxr/socket",
"ipc://tmpI4oRMI/socket",
"ipc://tmpCAYt2Z/socket",
"ipc://tmpqZx8hh/socket",
"ipc://tmpgp9Oxy/socket",
"ipc://tmpqRQxNP/socket",
"ipc://tmpuIEi36/socket",
"ipc://tmpM4Kgjo/socket"
],
"ventilator <-> sink": "ipc://tmp05TIha/socket",
"verbose": false,
"worker -> sink": "ipc://tmppA4sds/socket",
"xla": false,
"zmq_version": "4.2.5"
}
ps listing from inside the container clearly shows "-show_tokens_to_client" as CLI option:
/opt/docker$ docker exec -it bert-as-service /bin/bash
root@2c6ec6089cde:/app# ps axww
PID TTY STAT TIME COMMAND
1 pts/0 Ss+ 0:00 /bin/sh /app/entrypoint.sh 1
6 pts/0 Sl+ 0:05 /usr/bin/python3 /usr/local/bin/bert-serving-start -model_dir /model -max_seq_len NONE -show_tokens_to_client -http_port 8080 -num_worker=1
[x] Are you running the latest
bert-as-service
? [x] Did you follow the installation and the usage instructions inREADME.md
? [x] Did you check the FAQ list inREADME.md
? [x] Did you perform a cursory search on existing issues?System information
docker with custom entrypoint:
Description
and calling the server via http json post to patched http.py:
http.py is patched as follows:
Then this issue shows up:
No tokenization in the response. Log file shows:
However, http://192.168.1.129:8080/status/server shows:
...