YKG / hptc-shichendahai

2 stars 2 forks source link

执行测试详细日志 verbose #7

Open YKG opened 4 years ago

YKG commented 4 years ago
echo "TiDB sysbench test "; date

# apt update

sudo apt update

# install tiup

curl --proto '=https' --tlsv1.2 -sSf https://tiup-mirrors.pingcap.com/install.sh | sh
source .bashrc
which tiup
tiup cluster
tiup --binary cluster

# install mysql client

#sudo apt install mysql-client-core-5.7 -y

sudo apt install mariadb-client-core-10.3 -y # ubuntu 20.04

# install sysbench

curl -s https://packagecloud.io/install/repositories/akopytov/sysbench/script.deb.sh | sudo bash
sudo apt -y install sysbench

# format & mount ssd
# qingdao

ssh 172.29.3.188 "mkfs -t ext4 /dev/vdb;mkdir /ssd; chmod 777 /ssd; mount /dev/vdb /ssd;lsblk"
ssh 172.29.3.189 "mkfs -t ext4 /dev/vdb;mkdir /ssd; chmod 777 /ssd; mount /dev/vdb /ssd;lsblk"
ssh 172.29.3.190 "mkfs -t ext4 /dev/vdb;mkdir /ssd; chmod 777 /ssd; mount /dev/vdb /ssd;lsblk"

# create cluster topo.yml file

cat > topo.yml << "EOF"
global:
  user: "tidb"
  ssh_port: 22
  deploy_dir: "/ssd/tidb-deploy"
  data_dir: "/ssd/tidb-data"

pd_servers:
  - host: 172.29.3.187

tidb_servers:
  - host: 172.29.3.187

tikv_servers:
  - host: 172.29.3.188
  - host: 172.29.3.189
  - host: 172.29.3.190

monitoring_servers:
  - host: 172.29.3.187

grafana_servers:
  - host: 172.29.3.187
EOF

# deploy & start cluster

tiup cluster clean aaa --all
tiup cluster destroy aaa
tiup cluster deploy aaa v4.0.6 ./topo.yml -y
tiup cluster start aaa

# mysql> create database sbtest
# set global variables// enable opptimistic

mysql -h 127.0.0.1 -P 4000 -uroot -e "create database sbtest;set global tidb_disable_txn_auto_retry=off;set global tidb_txn_mode='optimistic';"

# sysbench prepare

cat > config << "EOF"
mysql-host=127.0.0.1
mysql-port=4000
mysql-user=root
mysql-password=
mysql-db=sbtest
time=150
threads=16
report-interval=10
db-driver=mysql
rand-type=uniform
EOF

echo "Start sysbench oltp_update_index --config-file=config prepare --tables=16 --table-size=1000000 "
df -h
date
time sysbench oltp_update_index --config-file=config prepare --tables=16 --table-size=1000000
date
df -h
echo "Prepare done"

# enable pressimistic 

mysql -h 127.0.0.1 -P 4000 -uroot -e "set global tidb_txn_mode='pessimistic';"

# sysbench run

echo "Start sysbench oltp_update_index --config-file=config --threads=900 run --tables=16 --table-size=1000000 "
df -h
date
time sysbench oltp_update_index --config-file=config --threads=900 run --tables=16 --table-size=1000000
date
df -h
echo "Run done"
root@node3:~# ./a.sh
Starting component `cluster`: /root/.tiup/components/cluster/v1.1.2/tiup-cluster destroy aaa
This operation will destroy tidb v4.0.6 cluster aaa and its data.
Do you want to continue? [y/N]: y
Destroying cluster...
+ [ Serial ] - SSHKeySet: privateKey=/root/.tiup/storage/cluster/clusters/aaa/ssh/id_rsa, publicKey=/root/.tiup/storage/cluster/clusters/aaa/ssh/id_rsa.pub
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.189
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.188
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.190
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [ Serial ] - StopCluster
Stopping component grafana
        Stopping instance 172.29.3.187
        Stop grafana 172.29.3.187:3000 success
Stopping component prometheus
        Stopping instance 172.29.3.187
        Stop prometheus 172.29.3.187:9090 success
Stopping component tidb
        Stopping instance 172.29.3.187
        Stop tidb 172.29.3.187:4000 success
Stopping component tikv
        Stopping instance 172.29.3.188
        Stopping instance 172.29.3.189
        Stopping instance 172.29.3.190
        Stop tikv 172.29.3.188:20160 success
        Stop tikv 172.29.3.189:20160 success
        Stop tikv 172.29.3.190:20160 success
Stopping component node_exporter
Stopping component blackbox_exporter
Stopping component node_exporter
Stopping component blackbox_exporter
Stopping component node_exporter
Stopping component blackbox_exporter
Stopping component pd
        Stopping instance 172.29.3.187
        Stop pd 172.29.3.187:2379 success
Stopping component node_exporter
Stopping component blackbox_exporter
+ [ Serial ] - DestroyCluster
Destroying component grafana
Destroying instance 172.29.3.187
Deleting paths on 172.29.3.187: /tidb-deploy/grafana-3000 /etc/systemd/system/grafana-3000.service
Destroy 172.29.3.187 success
- Destroy grafana paths: [/tidb-deploy/grafana-3000 /etc/systemd/system/grafana-3000.service]
Destroying component prometheus
Destroying instance 172.29.3.187
Deleting paths on 172.29.3.187: /tidb-data/prometheus-9090 /tidb-deploy/prometheus-9090/log /tidb-deploy/prometheus-9090 /etc/systemd/system/prometheus-9090.service
Destroy 172.29.3.187 success
- Destroy prometheus paths: [/tidb-deploy/prometheus-9090 /etc/systemd/system/prometheus-9090.service /tidb-data/prometheus-9090 /tidb-deploy/prometheus-9090/log]
Destroying component tidb
Destroying instance 172.29.3.187
Deleting paths on 172.29.3.187: /tidb-deploy/tidb-4000/log /tidb-deploy/tidb-4000 /etc/systemd/system/tidb-4000.service
Destroy 172.29.3.187 success
- Destroy tidb paths: [/tidb-deploy/tidb-4000 /etc/systemd/system/tidb-4000.service /tidb-deploy/tidb-4000/log]
Destroying component tikv
Destroying instance 172.29.3.188
Deleting paths on 172.29.3.188: /tidb-data/tikv-20160 /tidb-deploy/tikv-20160/log /tidb-deploy/tikv-20160 /etc/systemd/system/tikv-20160.service
Destroy 172.29.3.188 success
- Destroy tikv paths: [/etc/systemd/system/tikv-20160.service /tidb-data/tikv-20160 /tidb-deploy/tikv-20160/log /tidb-deploy/tikv-20160]
Destroying instance 172.29.3.189
Deleting paths on 172.29.3.189: /etc/systemd/system/tikv-20160.service /tidb-data/tikv-20160 /tidb-deploy/tikv-20160/log /tidb-deploy/tikv-20160
Destroy 172.29.3.189 success
- Destroy tikv paths: [/tidb-data/tikv-20160 /tidb-deploy/tikv-20160/log /tidb-deploy/tikv-20160 /etc/systemd/system/tikv-20160.service]
Destroying instance 172.29.3.190
Deleting paths on 172.29.3.190: /tidb-data/tikv-20160 /tidb-deploy/tikv-20160/log /tidb-deploy/tikv-20160 /etc/systemd/system/tikv-20160.service
Destroy 172.29.3.190 success
- Destroy tikv paths: [/tidb-data/tikv-20160 /tidb-deploy/tikv-20160/log /tidb-deploy/tikv-20160 /etc/systemd/system/tikv-20160.service]
Destroying monitored 172.29.3.188
Destroying monitored
        Destroying instance 172.29.3.188
Destroy monitored on 172.29.3.188 success
Destroying monitored 172.29.3.189
Destroying monitored
        Destroying instance 172.29.3.189
Destroy monitored on 172.29.3.189 success
Destroying monitored 172.29.3.190
Destroying monitored
        Destroying instance 172.29.3.190
Destroy monitored on 172.29.3.190 success
Destroying component pd
Destroying instance 172.29.3.187
Deleting paths on 172.29.3.187: /tidb-data/pd-2379 /tidb-deploy/pd-2379/log /tidb-deploy/pd-2379 /etc/systemd/system/pd-2379.service
Destroy 172.29.3.187 success
- Destroy pd paths: [/tidb-data/pd-2379 /tidb-deploy/pd-2379/log /tidb-deploy/pd-2379 /etc/systemd/system/pd-2379.service]
Destroying monitored 172.29.3.187
Destroying monitored
        Destroying instance 172.29.3.187
Destroy monitored on 172.29.3.187 success
Destroyed cluster `aaa` successfully
Starting component `cluster`: /root/.tiup/components/cluster/v1.1.2/tiup-cluster deploy aaa v4.0.6 ./topo.yml -y
+ Generate SSH keys ... Done
+ Download TiDB components
  - Download pd:v4.0.6 (linux/amd64) ... Done
  - Download tikv:v4.0.6 (linux/amd64) ... Done
  - Download tidb:v4.0.6 (linux/amd64) ... Done
  - Download prometheus:v4.0.6 (linux/amd64) ... Done
  - Download grafana:v4.0.6 (linux/amd64) ... Done
  - Download node_exporter:v0.17.0 (linux/amd64) ... Done
  - Download blackbox_exporter:v0.12.0 (linux/amd64) ... Done
+ Initialize target host environments
  - Prepare 172.29.3.187:22 ... Done
  - Prepare 172.29.3.188:22 ... Done
  - Prepare 172.29.3.189:22 ... Done
  - Prepare 172.29.3.190:22 ... Done
+ Copy files
  - Copy pd -> 172.29.3.187 ... Done
  - Copy tikv -> 172.29.3.188 ... Done
  - Copy tikv -> 172.29.3.189 ... Done
  - Copy tikv -> 172.29.3.190 ... Done
  - Copy tidb -> 172.29.3.187 ... Done
  - Copy prometheus -> 172.29.3.187 ... Done
  - Copy grafana -> 172.29.3.187 ... Done
  - Copy node_exporter -> 172.29.3.187 ... Done
  - Copy node_exporter -> 172.29.3.188 ... Done
  - Copy node_exporter -> 172.29.3.189 ... Done
  - Copy node_exporter -> 172.29.3.190 ... Done
  - Copy blackbox_exporter -> 172.29.3.187 ... Done
  - Copy blackbox_exporter -> 172.29.3.188 ... Done
  - Copy blackbox_exporter -> 172.29.3.189 ... Done
  - Copy blackbox_exporter -> 172.29.3.190 ... Done
+ Check status
Deployed cluster `aaa` successfully, you can start the cluster via `tiup cluster start aaa`
Starting component `cluster`: /root/.tiup/components/cluster/v1.1.2/tiup-cluster start aaa
Starting cluster aaa...
+ [ Serial ] - SSHKeySet: privateKey=/root/.tiup/storage/cluster/clusters/aaa/ssh/id_rsa, publicKey=/root/.tiup/storage/cluster/clusters/aaa/ssh/id_rsa.pub
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.189
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.188
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.190
+ [ Serial ] - StartCluster
Starting component pd
        Starting instance pd 172.29.3.187:2379
        Start pd 172.29.3.187:2379 success
Starting component node_exporter
        Starting instance 172.29.3.187
        Start 172.29.3.187 success
Starting component blackbox_exporter
        Starting instance 172.29.3.187
        Start 172.29.3.187 success
Starting component tikv
        Starting instance tikv 172.29.3.190:20160
        Starting instance tikv 172.29.3.188:20160
        Starting instance tikv 172.29.3.189:20160
        Start tikv 172.29.3.190:20160 success
        Start tikv 172.29.3.188:20160 success
        Start tikv 172.29.3.189:20160 success
Starting component node_exporter
        Starting instance 172.29.3.188
        Start 172.29.3.188 success
Starting component blackbox_exporter
        Starting instance 172.29.3.188
        Start 172.29.3.188 success
Starting component node_exporter
        Starting instance 172.29.3.189
        Start 172.29.3.189 success
Starting component blackbox_exporter
        Starting instance 172.29.3.189
        Start 172.29.3.189 success
Starting component node_exporter
        Starting instance 172.29.3.190
        Start 172.29.3.190 success
Starting component blackbox_exporter
        Starting instance 172.29.3.190
        Start 172.29.3.190 success
Starting component tidb
        Starting instance tidb 172.29.3.187:4000
        Start tidb 172.29.3.187:4000 success
Starting component prometheus
        Starting instance prometheus 172.29.3.187:9090
        Start prometheus 172.29.3.187:9090 success
Starting component grafana
        Starting instance grafana 172.29.3.187:3000
        Start grafana 172.29.3.187:3000 success
+ [ Serial ] - UpdateTopology: cluster=aaa
Started cluster `aaa` successfully
Start sysbench oltp_common --config-file=config prepare --tables=16 --table-size=100000
Filesystem      Size  Used Avail Use% Mounted on
udev             16G     0   16G   0% /dev
tmpfs           3.2G  736K  3.2G   1% /run
/dev/vda1        40G  4.2G   34G  12% /
tmpfs            16G     0   16G   0% /dev/shm
tmpfs           5.0M     0  5.0M   0% /run/lock
tmpfs            16G     0   16G   0% /sys/fs/cgroup
tmpfs           3.2G     0  3.2G   0% /run/user/0
/dev/vdb        879G  683M  834G   1% /ssd
tmpfs           3.2G     0  3.2G   0% /run/user/1000
Mon 21 Sep 2020 09:04:45 AM CST
sysbench 1.0.20 (using bundled LuaJIT 2.1.0-beta2)

Initializing worker threads...

Creating table 'sbtest8'...Creating table 'sbtest10'...

Creating table 'sbtest5'...
Creating table 'sbtest7'...
Creating table 'sbtest1'...
Creating table 'sbtest3'...
Creating table 'sbtest9'...Creating table 'sbtest13'...

Creating table 'sbtest12'...Creating table 'sbtest6'...
Creating table 'sbtest2'...Creating table 'sbtest14'...
Creating table 'sbtest11'...

Creating table 'sbtest15'...
Creating table 'sbtest4'...
Creating table 'sbtest16'...
Inserting 100000 records into 'sbtest10'
Inserting 100000 records into 'sbtest2'
Inserting 100000 records into 'sbtest12'
Inserting 100000 records into 'sbtest6'
Inserting 100000 records into 'sbtest4'
Inserting 100000 records into 'sbtest8'
Inserting 100000 records into 'sbtest13'
Inserting 100000 records into 'sbtest16'
Inserting 100000 records into 'sbtest7'
Inserting 100000 records into 'sbtest5'
Inserting 100000 records into 'sbtest3'
Inserting 100000 records into 'sbtest14'
Inserting 100000 records into 'sbtest15'
Inserting 100000 records into 'sbtest1'
Inserting 100000 records into 'sbtest11'
Inserting 100000 records into 'sbtest9'
Creating a secondary index on 'sbtest10'...
Creating a secondary index on 'sbtest11'...
Creating a secondary index on 'sbtest2'...
Creating a secondary index on 'sbtest4'...
Creating a secondary index on 'sbtest16'...
Creating a secondary index on 'sbtest12'...
Creating a secondary index on 'sbtest6'...
Creating a secondary index on 'sbtest13'...
Creating a secondary index on 'sbtest3'...
Creating a secondary index on 'sbtest14'...
Creating a secondary index on 'sbtest8'...
Creating a secondary index on 'sbtest15'...
Creating a secondary index on 'sbtest5'...
Creating a secondary index on 'sbtest1'...
Creating a secondary index on 'sbtest7'...
Creating a secondary index on 'sbtest9'...

real    1m44.923s
user    0m3.385s
sys     0m0.185s
Mon 21 Sep 2020 09:06:30 AM CST
Filesystem      Size  Used Avail Use% Mounted on
udev             16G     0   16G   0% /dev
tmpfs           3.2G  728K  3.2G   1% /run
/dev/vda1        40G  4.2G   34G  12% /
tmpfs            16G     0   16G   0% /dev/shm
tmpfs           5.0M     0  5.0M   0% /run/lock
tmpfs            16G     0   16G   0% /sys/fs/cgroup
tmpfs           3.2G     0  3.2G   0% /run/user/0
/dev/vdb        879G  693M  834G   1% /ssd
Prepare done
Start sysbench oltp_update_index --config-file=config --threads=900 run --tables=16 --table-size=100000
Filesystem      Size  Used Avail Use% Mounted on
udev             16G     0   16G   0% /dev
tmpfs           3.2G  728K  3.2G   1% /run
/dev/vda1        40G  4.2G   34G  12% /
tmpfs            16G     0   16G   0% /dev/shm
tmpfs           5.0M     0  5.0M   0% /run/lock
tmpfs            16G     0   16G   0% /sys/fs/cgroup
tmpfs           3.2G     0  3.2G   0% /run/user/0
/dev/vdb        879G  693M  834G   1% /ssd
Mon 21 Sep 2020 09:06:30 AM CST
sysbench 1.0.20 (using bundled LuaJIT 2.1.0-beta2)

Running the test with following options:
Number of threads: 900
Report intermediate results every 10 second(s)
Initializing random number generator from current time

Initializing worker threads...

Threads started!

[ 10s ] thds: 900 tps: 17913.40 qps: 17913.40 (r/w/o: 0.00/17913.40/0.00) lat (ms,95%): 94.10 err/s: 0.00 reconn/s: 0.00
[ 20s ] thds: 900 tps: 19173.53 qps: 19173.53 (r/w/o: 0.00/19173.53/0.00) lat (ms,95%): 89.16 err/s: 0.00 reconn/s: 0.00
[ 30s ] thds: 900 tps: 19279.52 qps: 19279.52 (r/w/o: 0.00/19279.52/0.00) lat (ms,95%): 92.42 err/s: 0.00 reconn/s: 0.00
[ 40s ] thds: 900 tps: 19123.84 qps: 19123.84 (r/w/o: 0.00/19123.84/0.00) lat (ms,95%): 90.78 err/s: 0.00 reconn/s: 0.00
[ 50s ] thds: 900 tps: 19475.55 qps: 19475.55 (r/w/o: 0.00/19475.55/0.00) lat (ms,95%): 89.16 err/s: 0.00 reconn/s: 0.00
[ 60s ] thds: 900 tps: 19563.38 qps: 19563.38 (r/w/o: 0.00/19563.38/0.00) lat (ms,95%): 87.56 err/s: 0.00 reconn/s: 0.00
[ 70s ] thds: 900 tps: 19425.80 qps: 19425.80 (r/w/o: 0.00/19425.80/0.00) lat (ms,95%): 90.78 err/s: 0.00 reconn/s: 0.00
root@node3:~# tiup cluster clean aaa --all
Starting component `cluster`: /root/.tiup/components/cluster/v1.1.2/tiup-cluster clean aaa --all
This operation will clean tidb v4.0.6 cluster aaa's data and log.
Nodes will be ignored: []
Roles will be ignored: []
Do you want to continue? [y/N]: y
Cleanup cluster...
+ [ Serial ] - SSHKeySet: privateKey=/root/.tiup/storage/cluster/clusters/aaa/ssh/id_rsa, publicKey=/root/.tiup/storage/cluster/clusters/aaa/ssh/id_rsa.pub
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.190
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.189
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.188
+ [ Serial ] - StopCluster
Stopping component grafana
root@node3:~# ./b.sh
Starting component `cluster`: /root/.tiup/components/cluster/v1.1.2/tiup-cluster clean aaa --all
This operation will clean tidb v4.0.6 cluster aaa's data and log.
Nodes will be ignored: []
Roles will be ignored: []
Do you want to continue? [y/N]: y
Cleanup cluster...
+ [ Serial ] - SSHKeySet: privateKey=/root/.tiup/storage/cluster/clusters/aaa/ssh/id_rsa, publicKey=/root/.tiup/storage/cluster/clusters/aaa/ssh/id_rsa.pub
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.189
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.190
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.188
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [ Serial ] - StopCluster
Stopping component grafana
        Stopping instance 172.29.3.187
        Stop grafana 172.29.3.187:3000 success
Stopping component prometheus
        Stopping instance 172.29.3.187
        Stop prometheus 172.29.3.187:9090 success
Stopping component tidb
        Stopping instance 172.29.3.187
        Stop tidb 172.29.3.187:4000 success
Stopping component tikv
        Stopping instance 172.29.3.190
        Stopping instance 172.29.3.188
        Stopping instance 172.29.3.189
        Stop tikv 172.29.3.188:20160 success
        Stop tikv 172.29.3.190:20160 success
        Stop tikv 172.29.3.189:20160 success
Stopping component node_exporter
Stopping component blackbox_exporter
Stopping component node_exporter
Stopping component blackbox_exporter
Stopping component node_exporter
Stopping component blackbox_exporter
Stopping component pd
        Stopping instance 172.29.3.187
        Stop pd 172.29.3.187:2379 success
Stopping component node_exporter
Stopping component blackbox_exporter
+ [ Serial ] - CleanupCluster
Cleanup instance 172.29.3.187
Deleting paths on 172.29.3.187: /ssd/tidb-deploy/grafana-3000/log/*
Cleanup 172.29.3.187 success
- Clanup grafana files: [/ssd/tidb-deploy/grafana-3000/log/*]
Cleanup instance 172.29.3.187
Deleting paths on 172.29.3.187: /ssd/tidb-data/prometheus-9090/* /ssd/tidb-deploy/prometheus-9090/log/*
Cleanup 172.29.3.187 success
- Clanup prometheus files: [/ssd/tidb-data/prometheus-9090/* /ssd/tidb-deploy/prometheus-9090/log/*]
Cleanup instance 172.29.3.187
Deleting paths on 172.29.3.187: /ssd/tidb-deploy/tidb-4000/log/*
Cleanup 172.29.3.187 success
- Clanup tidb files: [/ssd/tidb-deploy/tidb-4000/log/*]
Cleanup instance 172.29.3.188
Deleting paths on 172.29.3.188: /ssd/tidb-data/tikv-20160/* /ssd/tidb-deploy/tikv-20160/log/*
Cleanup 172.29.3.188 success
- Clanup tikv files: [/ssd/tidb-data/tikv-20160/* /ssd/tidb-deploy/tikv-20160/log/*]
Cleanup instance 172.29.3.189
Deleting paths on 172.29.3.189: /ssd/tidb-data/tikv-20160/* /ssd/tidb-deploy/tikv-20160/log/*
Cleanup 172.29.3.189 success
- Clanup tikv files: [/ssd/tidb-data/tikv-20160/* /ssd/tidb-deploy/tikv-20160/log/*]
Cleanup instance 172.29.3.190
Deleting paths on 172.29.3.190: /ssd/tidb-data/tikv-20160/* /ssd/tidb-deploy/tikv-20160/log/*
Cleanup 172.29.3.190 success
- Clanup tikv files: [/ssd/tidb-data/tikv-20160/* /ssd/tidb-deploy/tikv-20160/log/*]
Cleanup instance 172.29.3.187
Deleting paths on 172.29.3.187: /ssd/tidb-data/pd-2379/* /ssd/tidb-deploy/pd-2379/log/*
Cleanup 172.29.3.187 success
- Clanup pd files: [/ssd/tidb-data/pd-2379/* /ssd/tidb-deploy/pd-2379/log/*]
Cleanup cluster `aaa` successfully
Starting component `cluster`: /root/.tiup/components/cluster/v1.1.2/tiup-cluster destroy aaa
This operation will destroy tidb v4.0.6 cluster aaa and its data.
Do you want to continue? [y/N]: y
Destroying cluster...
+ [ Serial ] - SSHKeySet: privateKey=/root/.tiup/storage/cluster/clusters/aaa/ssh/id_rsa, publicKey=/root/.tiup/storage/cluster/clusters/aaa/ssh/id_rsa.pub
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.189
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.188
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.190
+ [ Serial ] - StopCluster
Stopping component grafana
        Stopping instance 172.29.3.187
        Stop grafana 172.29.3.187:3000 success
Stopping component prometheus
        Stopping instance 172.29.3.187
        Stop prometheus 172.29.3.187:9090 success
Stopping component tidb
        Stopping instance 172.29.3.187
        Stop tidb 172.29.3.187:4000 success
Stopping component tikv
        Stopping instance 172.29.3.190
        Stopping instance 172.29.3.188
        Stopping instance 172.29.3.189
        Stop tikv 172.29.3.189:20160 success
        Stop tikv 172.29.3.190:20160 success
        Stop tikv 172.29.3.188:20160 success
Stopping component node_exporter
Stopping component blackbox_exporter
Stopping component node_exporter
Stopping component blackbox_exporter
Stopping component node_exporter
Stopping component blackbox_exporter
Stopping component pd
        Stopping instance 172.29.3.187
        Stop pd 172.29.3.187:2379 success
Stopping component node_exporter
Stopping component blackbox_exporter
+ [ Serial ] - DestroyCluster
Destroying component grafana
Destroying instance 172.29.3.187
Deleting paths on 172.29.3.187: /ssd/tidb-deploy/grafana-3000 /etc/systemd/system/grafana-3000.service
Destroy 172.29.3.187 success
- Destroy grafana paths: [/ssd/tidb-deploy/grafana-3000 /etc/systemd/system/grafana-3000.service]
Destroying component prometheus
Destroying instance 172.29.3.187
Deleting paths on 172.29.3.187: /ssd/tidb-data/prometheus-9090 /ssd/tidb-deploy/prometheus-9090/log /ssd/tidb-deploy/prometheus-9090 /etc/systemd/system/prometheus-9090.service
Destroy 172.29.3.187 success
- Destroy prometheus paths: [/ssd/tidb-data/prometheus-9090 /ssd/tidb-deploy/prometheus-9090/log /ssd/tidb-deploy/prometheus-9090 /etc/systemd/system/prometheus-9090.service]
Destroying component tidb
Destroying instance 172.29.3.187
Deleting paths on 172.29.3.187: /ssd/tidb-deploy/tidb-4000/log /ssd/tidb-deploy/tidb-4000 /etc/systemd/system/tidb-4000.service
Destroy 172.29.3.187 success
- Destroy tidb paths: [/ssd/tidb-deploy/tidb-4000/log /ssd/tidb-deploy/tidb-4000 /etc/systemd/system/tidb-4000.service]
Destroying component tikv
Destroying instance 172.29.3.188
Deleting paths on 172.29.3.188: /ssd/tidb-data/tikv-20160 /ssd/tidb-deploy/tikv-20160/log /ssd/tidb-deploy/tikv-20160 /etc/systemd/system/tikv-20160.service
Destroy 172.29.3.188 success
- Destroy tikv paths: [/ssd/tidb-deploy/tikv-20160 /etc/systemd/system/tikv-20160.service /ssd/tidb-data/tikv-20160 /ssd/tidb-deploy/tikv-20160/log]
Destroying instance 172.29.3.189
Deleting paths on 172.29.3.189: /ssd/tidb-deploy/tikv-20160 /etc/systemd/system/tikv-20160.service /ssd/tidb-data/tikv-20160 /ssd/tidb-deploy/tikv-20160/log
Destroy 172.29.3.189 success
- Destroy tikv paths: [/ssd/tidb-data/tikv-20160 /ssd/tidb-deploy/tikv-20160/log /ssd/tidb-deploy/tikv-20160 /etc/systemd/system/tikv-20160.service]
Destroying instance 172.29.3.190
Deleting paths on 172.29.3.190: /ssd/tidb-data/tikv-20160 /ssd/tidb-deploy/tikv-20160/log /ssd/tidb-deploy/tikv-20160 /etc/systemd/system/tikv-20160.service
Destroy 172.29.3.190 success
- Destroy tikv paths: [/ssd/tidb-deploy/tikv-20160/log /ssd/tidb-deploy/tikv-20160 /etc/systemd/system/tikv-20160.service /ssd/tidb-data/tikv-20160]
Destroying monitored 172.29.3.188
Destroying monitored
        Destroying instance 172.29.3.188
Destroy monitored on 172.29.3.188 success
Destroying monitored 172.29.3.189
Destroying monitored
        Destroying instance 172.29.3.189
Destroy monitored on 172.29.3.189 success
Destroying monitored 172.29.3.190
Destroying monitored
        Destroying instance 172.29.3.190
Destroy monitored on 172.29.3.190 success
Destroying component pd
Destroying instance 172.29.3.187
Deleting paths on 172.29.3.187: /ssd/tidb-deploy/pd-2379/log /ssd/tidb-deploy/pd-2379 /etc/systemd/system/pd-2379.service /ssd/tidb-data/pd-2379
Destroy 172.29.3.187 success
- Destroy pd paths: [/ssd/tidb-data/pd-2379 /ssd/tidb-deploy/pd-2379/log /ssd/tidb-deploy/pd-2379 /etc/systemd/system/pd-2379.service]
Destroying monitored 172.29.3.187
Destroying monitored
        Destroying instance 172.29.3.187
Destroy monitored on 172.29.3.187 success
Destroyed cluster `aaa` successfully
Starting component `cluster`: /root/.tiup/components/cluster/v1.1.2/tiup-cluster deploy aaa v4.0.6 ./topo.yml -y
+ Generate SSH keys ... Done
+ Download TiDB components
  - Download pd:v4.0.6 (linux/amd64) ... Done
  - Download tikv:v4.0.6 (linux/amd64) ... Done
  - Download tidb:v4.0.6 (linux/amd64) ... Done
  - Download prometheus:v4.0.6 (linux/amd64) ... Done
  - Download grafana:v4.0.6 (linux/amd64) ... Done
  - Download node_exporter:v0.17.0 (linux/amd64) ... Done
  - Download blackbox_exporter:v0.12.0 (linux/amd64) ... Done
+ Initialize target host environments
  - Prepare 172.29.3.187:22 ... Done
  - Prepare 172.29.3.188:22 ... Done
  - Prepare 172.29.3.189:22 ... Done
  - Prepare 172.29.3.190:22 ... Done
+ Copy files
  - Copy pd -> 172.29.3.187 ... Done
  - Copy tikv -> 172.29.3.188 ... Done
  - Copy tikv -> 172.29.3.189 ... Done
  - Copy tikv -> 172.29.3.190 ... Done
  - Copy tidb -> 172.29.3.187 ... Done
  - Copy prometheus -> 172.29.3.187 ... Done
  - Copy grafana -> 172.29.3.187 ... Done
  - Copy node_exporter -> 172.29.3.187 ... Done
  - Copy node_exporter -> 172.29.3.188 ... Done
  - Copy node_exporter -> 172.29.3.189 ... Done
  - Copy node_exporter -> 172.29.3.190 ... Done
  - Copy blackbox_exporter -> 172.29.3.188 ... Done
  - Copy blackbox_exporter -> 172.29.3.189 ... Done
  - Copy blackbox_exporter -> 172.29.3.190 ... Done
  - Copy blackbox_exporter -> 172.29.3.187 ... Done
+ Check status
Deployed cluster `aaa` successfully, you can start the cluster via `tiup cluster start aaa`
Starting component `cluster`: /root/.tiup/components/cluster/v1.1.2/tiup-cluster start aaa
Starting cluster aaa...
+ [ Serial ] - SSHKeySet: privateKey=/root/.tiup/storage/cluster/clusters/aaa/ssh/id_rsa, publicKey=/root/.tiup/storage/cluster/clusters/aaa/ssh/id_rsa.pub
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.189
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.188
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.190
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [ Serial ] - StartCluster
Starting component pd
        Starting instance pd 172.29.3.187:2379
        Start pd 172.29.3.187:2379 success
Starting component node_exporter
        Starting instance 172.29.3.187
        Start 172.29.3.187 success
Starting component blackbox_exporter
        Starting instance 172.29.3.187
        Start 172.29.3.187 success
Starting component tikv
        Starting instance tikv 172.29.3.190:20160
        Starting instance tikv 172.29.3.189:20160
        Starting instance tikv 172.29.3.188:20160
        Start tikv 172.29.3.189:20160 success
        Start tikv 172.29.3.190:20160 success
        Start tikv 172.29.3.188:20160 success
Starting component node_exporter
        Starting instance 172.29.3.188
        Start 172.29.3.188 success
Starting component blackbox_exporter
        Starting instance 172.29.3.188
        Start 172.29.3.188 success
Starting component node_exporter
        Starting instance 172.29.3.189
        Start 172.29.3.189 success
Starting component blackbox_exporter
        Starting instance 172.29.3.189
        Start 172.29.3.189 success
Starting component node_exporter
        Starting instance 172.29.3.190
        Start 172.29.3.190 success
Starting component blackbox_exporter
        Starting instance 172.29.3.190
        Start 172.29.3.190 success
Starting component tidb
        Starting instance tidb 172.29.3.187:4000
        Start tidb 172.29.3.187:4000 success
Starting component prometheus
        Starting instance prometheus 172.29.3.187:9090
        Start prometheus 172.29.3.187:9090 success
Starting component grafana
        Starting instance grafana 172.29.3.187:3000
        Start grafana 172.29.3.187:3000 success
+ [ Serial ] - UpdateTopology: cluster=aaa
root@node3:~# ./c.sh
Starting component `cluster`: /root/.tiup/components/cluster/v1.1.2/tiup-cluster clean aaa --all
This operation will clean tidb v4.0.6 cluster aaa's data and log.
Nodes will be ignored: []
Roles will be ignored: []
Do you want to continue? [y/N]: y
Cleanup cluster...
+ [ Serial ] - SSHKeySet: privateKey=/root/.tiup/storage/cluster/clusters/aaa/ssh/id_rsa, publicKey=/root/.tiup/storage/cluster/clusters/aaa/ssh/id_rsa.pub
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.189
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.190
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.188
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [ Serial ] - StopCluster
Stopping component grafana
        Stopping instance 172.29.3.187
        Stop grafana 172.29.3.187:3000 success
Stopping component prometheus
        Stopping instance 172.29.3.187
        Stop prometheus 172.29.3.187:9090 success
Stopping component tidb
        Stopping instance 172.29.3.187
        Stop tidb 172.29.3.187:4000 success
Stopping component tikv
        Stopping instance 172.29.3.190
        Stopping instance 172.29.3.188
        Stopping instance 172.29.3.189
        Stop tikv 172.29.3.190:20160 success
        Stop tikv 172.29.3.188:20160 success
        Stop tikv 172.29.3.189:20160 success
Stopping component node_exporter
Stopping component blackbox_exporter
Stopping component node_exporter
Stopping component blackbox_exporter
Stopping component node_exporter
Stopping component blackbox_exporter
Stopping component pd
        Stopping instance 172.29.3.187
        Stop pd 172.29.3.187:2379 success
Stopping component node_exporter
Stopping component blackbox_exporter
+ [ Serial ] - CleanupCluster
Cleanup instance 172.29.3.187
Deleting paths on 172.29.3.187: /ssd/tidb-deploy/grafana-3000/log/*
Cleanup 172.29.3.187 success
- Clanup grafana files: [/ssd/tidb-deploy/grafana-3000/log/*]
Cleanup instance 172.29.3.187
Deleting paths on 172.29.3.187: /ssd/tidb-data/prometheus-9090/* /ssd/tidb-deploy/prometheus-9090/log/*
Cleanup 172.29.3.187 success
- Clanup prometheus files: [/ssd/tidb-data/prometheus-9090/* /ssd/tidb-deploy/prometheus-9090/log/*]
Cleanup instance 172.29.3.187
Deleting paths on 172.29.3.187: /ssd/tidb-deploy/tidb-4000/log/*
Cleanup 172.29.3.187 success
- Clanup tidb files: [/ssd/tidb-deploy/tidb-4000/log/*]
Cleanup instance 172.29.3.188
Deleting paths on 172.29.3.188: /ssd/tidb-data/tikv-20160/* /ssd/tidb-deploy/tikv-20160/log/*
Cleanup 172.29.3.188 success
- Clanup tikv files: [/ssd/tidb-data/tikv-20160/* /ssd/tidb-deploy/tikv-20160/log/*]
Cleanup instance 172.29.3.189
Deleting paths on 172.29.3.189: /ssd/tidb-deploy/tikv-20160/log/* /ssd/tidb-data/tikv-20160/*
Cleanup 172.29.3.189 success
- Clanup tikv files: [/ssd/tidb-data/tikv-20160/* /ssd/tidb-deploy/tikv-20160/log/*]
Cleanup instance 172.29.3.190
Deleting paths on 172.29.3.190: /ssd/tidb-deploy/tikv-20160/log/* /ssd/tidb-data/tikv-20160/*
Cleanup 172.29.3.190 success
- Clanup tikv files: [/ssd/tidb-data/tikv-20160/* /ssd/tidb-deploy/tikv-20160/log/*]
Cleanup instance 172.29.3.187
Deleting paths on 172.29.3.187: /ssd/tidb-data/pd-2379/* /ssd/tidb-deploy/pd-2379/log/*
Cleanup 172.29.3.187 success
- Clanup pd files: [/ssd/tidb-data/pd-2379/* /ssd/tidb-deploy/pd-2379/log/*]
Cleanup cluster `aaa` successfully
Starting component `cluster`: /root/.tiup/components/cluster/v1.1.2/tiup-cluster destroy aaa
This operation will destroy tidb v4.0.6 cluster aaa and its data.
Do you want to continue? [y/N]: y
Destroying cluster...
+ [ Serial ] - SSHKeySet: privateKey=/root/.tiup/storage/cluster/clusters/aaa/ssh/id_rsa, publicKey=/root/.tiup/storage/cluster/clusters/aaa/ssh/id_rsa.pub
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.189
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.188
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.190
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [ Serial ] - StopCluster
Stopping component grafana
        Stopping instance 172.29.3.187
        Stop grafana 172.29.3.187:3000 success
Stopping component prometheus
        Stopping instance 172.29.3.187
        Stop prometheus 172.29.3.187:9090 success
Stopping component tidb
        Stopping instance 172.29.3.187
        Stop tidb 172.29.3.187:4000 success
Stopping component tikv
        Stopping instance 172.29.3.190
        Stopping instance 172.29.3.189
        Stopping instance 172.29.3.188
        Stop tikv 172.29.3.188:20160 success
        Stop tikv 172.29.3.190:20160 success
        Stop tikv 172.29.3.189:20160 success
Stopping component node_exporter
Stopping component blackbox_exporter
Stopping component node_exporter
Stopping component blackbox_exporter
Stopping component node_exporter
Stopping component blackbox_exporter
Stopping component pd
        Stopping instance 172.29.3.187
        Stop pd 172.29.3.187:2379 success
Stopping component node_exporter
Stopping component blackbox_exporter
+ [ Serial ] - DestroyCluster
Destroying component grafana
Destroying instance 172.29.3.187
Deleting paths on 172.29.3.187: /ssd/tidb-deploy/grafana-3000 /etc/systemd/system/grafana-3000.service
Destroy 172.29.3.187 success
- Destroy grafana paths: [/ssd/tidb-deploy/grafana-3000 /etc/systemd/system/grafana-3000.service]
Destroying component prometheus
Destroying instance 172.29.3.187
Deleting paths on 172.29.3.187: /ssd/tidb-data/prometheus-9090 /ssd/tidb-deploy/prometheus-9090/log /ssd/tidb-deploy/prometheus-9090 /etc/systemd/system/prometheus-9090.service
Destroy 172.29.3.187 success
- Destroy prometheus paths: [/etc/systemd/system/prometheus-9090.service /ssd/tidb-data/prometheus-9090 /ssd/tidb-deploy/prometheus-9090/log /ssd/tidb-deploy/prometheus-9090]
Destroying component tidb
Destroying instance 172.29.3.187
Deleting paths on 172.29.3.187: /ssd/tidb-deploy/tidb-4000/log /ssd/tidb-deploy/tidb-4000 /etc/systemd/system/tidb-4000.service
Destroy 172.29.3.187 success
- Destroy tidb paths: [/ssd/tidb-deploy/tidb-4000/log /ssd/tidb-deploy/tidb-4000 /etc/systemd/system/tidb-4000.service]
Destroying component tikv
Destroying instance 172.29.3.188
Deleting paths on 172.29.3.188: /ssd/tidb-deploy/tikv-20160/log /ssd/tidb-deploy/tikv-20160 /etc/systemd/system/tikv-20160.service /ssd/tidb-data/tikv-20160
Destroy 172.29.3.188 success
- Destroy tikv paths: [/ssd/tidb-deploy/tikv-20160/log /ssd/tidb-deploy/tikv-20160 /etc/systemd/system/tikv-20160.service /ssd/tidb-data/tikv-20160]
Destroying instance 172.29.3.189
Deleting paths on 172.29.3.189: /ssd/tidb-data/tikv-20160 /ssd/tidb-deploy/tikv-20160/log /ssd/tidb-deploy/tikv-20160 /etc/systemd/system/tikv-20160.service
Destroy 172.29.3.189 success
- Destroy tikv paths: [/ssd/tidb-deploy/tikv-20160 /etc/systemd/system/tikv-20160.service /ssd/tidb-data/tikv-20160 /ssd/tidb-deploy/tikv-20160/log]
Destroying instance 172.29.3.190
Deleting paths on 172.29.3.190: /ssd/tidb-data/tikv-20160 /ssd/tidb-deploy/tikv-20160/log /ssd/tidb-deploy/tikv-20160 /etc/systemd/system/tikv-20160.service
Destroy 172.29.3.190 success
- Destroy tikv paths: [/ssd/tidb-data/tikv-20160 /ssd/tidb-deploy/tikv-20160/log /ssd/tidb-deploy/tikv-20160 /etc/systemd/system/tikv-20160.service]
Destroying monitored 172.29.3.188
Destroying monitored
        Destroying instance 172.29.3.188
Destroy monitored on 172.29.3.188 success
Destroying monitored 172.29.3.189
Destroying monitored
        Destroying instance 172.29.3.189
Destroy monitored on 172.29.3.189 success
Destroying monitored 172.29.3.190
Destroying monitored
        Destroying instance 172.29.3.190
Destroy monitored on 172.29.3.190 success
Destroying component pd
Destroying instance 172.29.3.187
Deleting paths on 172.29.3.187: /ssd/tidb-data/pd-2379 /ssd/tidb-deploy/pd-2379/log /ssd/tidb-deploy/pd-2379 /etc/systemd/system/pd-2379.service
Destroy 172.29.3.187 success
- Destroy pd paths: [/etc/systemd/system/pd-2379.service /ssd/tidb-data/pd-2379 /ssd/tidb-deploy/pd-2379/log /ssd/tidb-deploy/pd-2379]
Destroying monitored 172.29.3.187
Destroying monitored
        Destroying instance 172.29.3.187
Destroy monitored on 172.29.3.187 success
Destroyed cluster `aaa` successfully
Starting component `cluster`: /root/.tiup/components/cluster/v1.1.2/tiup-cluster deploy aaa v4.0.6 ./topo.yml -y
+ Generate SSH keys ... Done
+ Download TiDB components
  - Download pd:v4.0.6 (linux/amd64) ... Done
  - Download tikv:v4.0.6 (linux/amd64) ... Done
  - Download tidb:v4.0.6 (linux/amd64) ... Done
  - Download prometheus:v4.0.6 (linux/amd64) ... Done
  - Download grafana:v4.0.6 (linux/amd64) ... Done
  - Download node_exporter:v0.17.0 (linux/amd64) ... Done
  - Download blackbox_exporter:v0.12.0 (linux/amd64) ... Done
+ Initialize target host environments
  - Prepare 172.29.3.187:22 ... Done
  - Prepare 172.29.3.188:22 ... Done
  - Prepare 172.29.3.189:22 ... Done
  - Prepare 172.29.3.190:22 ... Done
+ Copy files
  - Copy pd -> 172.29.3.187 ... Done
  - Copy tikv -> 172.29.3.188 ... Done
  - Copy tikv -> 172.29.3.189 ... Done
  - Copy tikv -> 172.29.3.190 ... Done
  - Copy tidb -> 172.29.3.187 ... Done
  - Copy prometheus -> 172.29.3.187 ... Done
  - Copy grafana -> 172.29.3.187 ... Done
  - Copy node_exporter -> 172.29.3.187 ... Done
  - Copy node_exporter -> 172.29.3.188 ... Done
  - Copy node_exporter -> 172.29.3.189 ... Done
  - Copy node_exporter -> 172.29.3.190 ... Done
  - Copy blackbox_exporter -> 172.29.3.187 ... Done
  - Copy blackbox_exporter -> 172.29.3.188 ... Done
  - Copy blackbox_exporter -> 172.29.3.189 ... Done
  - Copy blackbox_exporter -> 172.29.3.190 ... Done
+ Check status
Deployed cluster `aaa` successfully, you can start the cluster via `tiup cluster start aaa`
Starting component `cluster`: /root/.tiup/components/cluster/v1.1.2/tiup-cluster start aaa
Starting cluster aaa...
+ [ Serial ] - SSHKeySet: privateKey=/root/.tiup/storage/cluster/clusters/aaa/ssh/id_rsa, publicKey=/root/.tiup/storage/cluster/clusters/aaa/ssh/id_rsa.pub
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.188
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.190
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.189
+ [Parallel] - UserSSH: user=tidb, host=172.29.3.187
+ [ Serial ] - StartCluster
Starting component pd
        Starting instance pd 172.29.3.187:2379
        Start pd 172.29.3.187:2379 success
Starting component node_exporter
        Starting instance 172.29.3.187
        Start 172.29.3.187 success
Starting component blackbox_exporter
        Starting instance 172.29.3.187
        Start 172.29.3.187 success
Starting component tikv
        Starting instance tikv 172.29.3.190:20160
        Starting instance tikv 172.29.3.188:20160
        Starting instance tikv 172.29.3.189:20160
        Start tikv 172.29.3.190:20160 success
        Start tikv 172.29.3.189:20160 success
        Start tikv 172.29.3.188:20160 success
Starting component node_exporter
        Starting instance 172.29.3.188
        Start 172.29.3.188 success
Starting component blackbox_exporter
        Starting instance 172.29.3.188
        Start 172.29.3.188 success
Starting component node_exporter
        Starting instance 172.29.3.189
        Start 172.29.3.189 success
Starting component blackbox_exporter
        Starting instance 172.29.3.189
        Start 172.29.3.189 success
Starting component node_exporter
        Starting instance 172.29.3.190
        Start 172.29.3.190 success
Starting component blackbox_exporter
        Starting instance 172.29.3.190
        Start 172.29.3.190 success
Starting component tidb
        Starting instance tidb 172.29.3.187:4000
        Start tidb 172.29.3.187:4000 success
Starting component prometheus
        Starting instance prometheus 172.29.3.187:9090
        Start prometheus 172.29.3.187:9090 success
Starting component grafana
        Starting instance grafana 172.29.3.187:3000
        Start grafana 172.29.3.187:3000 success
+ [ Serial ] - UpdateTopology: cluster=aaa
Started cluster `aaa` successfully
Start sysbench oltp_common --config-file=config prepare --tables=16 --table-size=1000000
Filesystem      Size  Used Avail Use% Mounted on
udev             16G     0   16G   0% /dev
tmpfs           3.2G  736K  3.2G   1% /run
/dev/vda1        40G  4.2G   34G  12% /
tmpfs            16G     0   16G   0% /dev/shm
tmpfs           5.0M     0  5.0M   0% /run/lock
tmpfs            16G     0   16G   0% /sys/fs/cgroup
tmpfs           3.2G     0  3.2G   0% /run/user/0
/dev/vdb        879G  683M  834G   1% /ssd
tmpfs           3.2G     0  3.2G   0% /run/user/1000
Mon 21 Sep 2020 09:26:05 AM CST
sysbench 1.0.20 (using bundled LuaJIT 2.1.0-beta2)

Initializing worker threads...

Creating table 'sbtest4'...
Creating table 'sbtest15'...
Creating table 'sbtest2'...
Creating table 'sbtest8'...
Creating table 'sbtest7'...
Creating table 'sbtest14'...
Creating table 'sbtest3'...
Creating table 'sbtest10'...
Creating table 'sbtest9'...
Creating table 'sbtest12'...
Creating table 'sbtest16'...
Creating table 'sbtest6'...
Creating table 'sbtest13'...
Creating table 'sbtest5'...
Creating table 'sbtest11'...
Creating table 'sbtest1'...
Inserting 1000000 records into 'sbtest15'
Inserting 1000000 records into 'sbtest5'
Inserting 1000000 records into 'sbtest7'
Inserting 1000000 records into 'sbtest14'
Inserting 1000000 records into 'sbtest11'
Inserting 1000000 records into 'sbtest9'
Inserting 1000000 records into 'sbtest1'
Inserting 1000000 records into 'sbtest10'
Inserting 1000000 records into 'sbtest8'
Inserting 1000000 records into 'sbtest6'
Inserting 1000000 records into 'sbtest2'
Inserting 1000000 records into 'sbtest16'
Inserting 1000000 records into 'sbtest13'
Inserting 1000000 records into 'sbtest4'
Inserting 1000000 records into 'sbtest3'
Inserting 1000000 records into 'sbtest12'
Creating a secondary index on 'sbtest4'...
Creating a secondary index on 'sbtest8'...
Creating a secondary index on 'sbtest10'...
Creating a secondary index on 'sbtest7'...
Creating a secondary index on 'sbtest15'...
Creating a secondary index on 'sbtest9'...
Creating a secondary index on 'sbtest5'...
Creating a secondary index on 'sbtest1'...
Creating a secondary index on 'sbtest16'...
Creating a secondary index on 'sbtest6'...
Creating a secondary index on 'sbtest11'...
Creating a secondary index on 'sbtest2'...
Creating a secondary index on 'sbtest3'...
Creating a secondary index on 'sbtest12'...
Creating a secondary index on 'sbtest14'...
Creating a secondary index on 'sbtest13'...

real    9m52.206s
user    0m32.793s
sys     0m1.815s
Mon 21 Sep 2020 09:35:57 AM CST
Filesystem      Size  Used Avail Use% Mounted on
udev             16G     0   16G   0% /dev
tmpfs           3.2G  728K  3.2G   1% /run
/dev/vda1        40G  4.2G   34G  12% /
tmpfs            16G     0   16G   0% /dev/shm
tmpfs           5.0M     0  5.0M   0% /run/lock
tmpfs            16G     0   16G   0% /sys/fs/cgroup
tmpfs           3.2G     0  3.2G   0% /run/user/0
/dev/vdb        879G  711M  834G   1% /ssd
Prepare done
Start sysbench oltp_update_index --config-file=config --threads=900 run --tables=16 --table-size=1000000
Filesystem      Size  Used Avail Use% Mounted on
udev             16G     0   16G   0% /dev
tmpfs           3.2G  728K  3.2G   1% /run
/dev/vda1        40G  4.2G   34G  12% /
tmpfs            16G     0   16G   0% /dev/shm
tmpfs           5.0M     0  5.0M   0% /run/lock
tmpfs            16G     0   16G   0% /sys/fs/cgroup
tmpfs           3.2G     0  3.2G   0% /run/user/0
/dev/vdb        879G  711M  834G   1% /ssd
Mon 21 Sep 2020 09:35:57 AM CST
sysbench 1.0.20 (using bundled LuaJIT 2.1.0-beta2)

Running the test with following options:
Number of threads: 900
Report intermediate results every 10 second(s)
Initializing random number generator from current time

Initializing worker threads...

Threads started!

[ 10s ] thds: 900 tps: 13774.02 qps: 13774.02 (r/w/o: 0.00/13769.23/4.80) lat (ms,95%): 132.49 err/s: 0.00 reconn/s: 0.00
[ 20s ] thds: 900 tps: 14866.57 qps: 14866.57 (r/w/o: 0.00/14860.77/5.80) lat (ms,95%): 130.13 err/s: 0.00 reconn/s: 0.00
[ 30s ] thds: 900 tps: 14968.73 qps: 14968.73 (r/w/o: 0.00/14962.33/6.40) lat (ms,95%): 130.13 err/s: 0.00 reconn/s: 0.00
[ 40s ] thds: 900 tps: 14974.44 qps: 14974.44 (r/w/o: 0.00/14968.84/5.60) lat (ms,95%): 130.13 err/s: 0.00 reconn/s: 0.00
[ 50s ] thds: 900 tps: 14744.86 qps: 14744.86 (r/w/o: 0.00/14738.36/6.50) lat (ms,95%): 130.13 err/s: 0.00 reconn/s: 0.00
[ 60s ] thds: 900 tps: 14882.39 qps: 14882.39 (r/w/o: 0.00/14875.39/7.00) lat (ms,95%): 132.49 err/s: 0.00 reconn/s: 0.00
[ 70s ] thds: 900 tps: 14426.50 qps: 14426.50 (r/w/o: 0.00/14420.10/6.40) lat (ms,95%): 134.90 err/s: 0.00 reconn/s: 0.00
[ 80s ] thds: 900 tps: 14817.20 qps: 14817.20 (r/w/o: 0.00/14812.00/5.20) lat (ms,95%): 132.49 err/s: 0.00 reconn/s: 0.00
[ 90s ] thds: 900 tps: 14699.38 qps: 14699.38 (r/w/o: 0.00/14693.18/6.20) lat (ms,95%): 132.49 err/s: 0.00 reconn/s: 0.00
[ 100s ] thds: 900 tps: 14736.37 qps: 14736.27 (r/w/o: 0.00/14730.17/6.10) lat (ms,95%): 132.49 err/s: 0.00 reconn/s: 0.00
[ 110s ] thds: 900 tps: 14465.94 qps: 14466.04 (r/w/o: 0.00/14460.24/5.80) lat (ms,95%): 134.90 err/s: 0.00 reconn/s: 0.00
[ 120s ] thds: 900 tps: 14541.82 qps: 14541.82 (r/w/o: 0.00/14536.82/5.00) lat (ms,95%): 132.49 err/s: 0.00 reconn/s: 0.00
[ 130s ] thds: 900 tps: 14529.53 qps: 14529.53 (r/w/o: 0.00/14523.03/6.50) lat (ms,95%): 134.90 err/s: 0.00 reconn/s: 0.00
[ 140s ] thds: 900 tps: 14301.69 qps: 14301.69 (r/w/o: 0.00/14296.89/4.80) lat (ms,95%): 132.49 err/s: 0.00 reconn/s: 0.00
[ 150s ] thds: 900 tps: 14507.21 qps: 14507.21 (r/w/o: 0.00/14500.11/7.10) lat (ms,95%): 134.90 err/s: 0.00 reconn/s: 0.00
SQL statistics:
    queries performed:
        read:                            0
        write:                           2192405
        other:                           892
        total:                           2193297
    transactions:                        2193297 (14602.80 per sec.)
    queries:                             2193297 (14602.80 per sec.)
    ignored errors:                      0      (0.00 per sec.)
    reconnects:                          0      (0.00 per sec.)

General statistics:
    total time:                          150.1954s
    total number of events:              2193297

Latency (ms):
         min:                                    1.52
         avg:                                   61.57
         max:                                 3340.25
         95th percentile:                      132.49
         sum:                            135043163.06

Threads fairness:
    events (avg/stddev):           2436.9967/13.60
    execution time (avg/stddev):   150.0480/0.05

real    2m31.453s
user    0m35.892s
sys     0m43.086s
Mon 21 Sep 2020 09:38:29 AM CST
Filesystem      Size  Used Avail Use% Mounted on
udev             16G     0   16G   0% /dev
tmpfs           3.2G  728K  3.2G   1% /run
/dev/vda1        40G  4.2G   34G  12% /
tmpfs            16G     0   16G   0% /dev/shm
tmpfs           5.0M     0  5.0M   0% /run/lock
tmpfs            16G     0   16G   0% /sys/fs/cgroup
tmpfs           3.2G     0  3.2G   0% /run/user/0
/dev/vdb        879G  730M  834G   1% /ssd
Run done
root@node3:~#
YKG commented 4 years ago

t1r3-non-nvme.log t1r3-nvme.log t1r10k-nvme.log t1r100k-nvme.log t1r1000-nvme.log