mingyun / mingyun.github.io

github主页
158 stars 92 forks source link

m3u8 转 mp4 #89

Open mingyun opened 7 years ago

mingyun commented 7 years ago
# !/usr/bin/python
# coding: utf-8

#import os
import sys
#import glob
#import json
#import math
#import Queue
#import base64
#import struct
#import socket
#import string
#import shutil
#import thread
#import random
#import smtplib
#import httplib
#import logging
#import tempfile
#import argparse
#import datetime
#import unittest
#import importlib
#import threading
#import traceback
import subprocess
#import ConfigParser
#from ftplib import FTP
#from email.header import Header
#from email.mime.text import MIMEText
#from logging.handlers import TimedRotatingFileHandler

import requests,re,urllib,time
from urlparse import urlparse
#from sys import argv
#播放地址
url='http://cnhlsvod01.t.vhall.com/vhalllive/363200352/fulllist.m3u8'
url='http://cnhlsvodhls01.e.vhall.com//vhallrecord/402798753/20170315113114/record.m3u8'
url = sys.argv[1]
#python test.py xxx.m3u8
if not url:
    print('erro url')
    sys.exit(0)

"""
#EXTM3U
#EXT-X-VERSION:3
#EXT-X-TARGETDURATION:10
#EXT-X-MEDIA-SEQUENCE:0
#EXTINF:4.010,
/vhallhls/36/32/00/363200352/363200352/20161009190045/livestream000000.ts
#EXTINF:4.017,
/vhallhls/36/32/00/363200352/363200352/20161009190045/livestream000001.ts
#EXTINF:0.749,
/vhallhls/36/32/00/363200352/363200352/20161009190045/livestream000002.ts
#EXT-X-DISCONTINUITY
#EXTINF:4.010,
/vhallhls/36/32/00/363200352/363200352/20161009190101/livestream000000.ts
#EXTINF:4.017,
/vhallhls/36/32/00/363200352/363200352/20161009190101/livestream000001.ts
#EXTINF:2.089,
/vhallhls/36/32/00/363200352/363200352/20161009190101/livestream000002.ts
#EXT-X-ENDLIST
"""
#url=argv[1]
print url
res=requests.get(url)
r=res.content
hosturl=urlparse(url).scheme+'://'+urlparse(url).hostname
print hosturl
"""
['/vhallhls/40/27/98/402798753/402798753/20170315113037/livestream000000.ts', '/
vhallhls/40/27/98/402798753/402798753/20170315113037/livestream000001.ts', '/vha
llhls/40/27/98/402798753/402798753/20170315113037/livestream000002.ts', '/vhallh
ls/40/27/98/402798753/402798753/20170315113037/livestream000003.ts', '/vhallhls/
40/27/98/402798753/402798753/20170315113037/livestream000004.ts', '/vhallhls/40/
27/98/402798753/402798753/20170315113037/livestream000005.ts', '/vhallhls/40/27/
98/402798753/402798753/20170315113037/livestream000006.ts']
['http://cnhlsvodhls01.e.vhall.com/vhallhls/40/27/98/402798753/402798753/2017031
5113037/livestream000000.ts', 'http://cnhlsvodhls01.e.vhall.com/vhallhls/40/27/9
8/402798753/402798753/20170315113037/livestream000001.ts', 'http://cnhlsvodhls01
.e.vhall.com/vhallhls/40/27/98/402798753/402798753/20170315113037/livestream0000
02.ts', 'http://cnhlsvodhls01.e.vhall.com/vhallhls/40/27/98/402798753/402798753/
20170315113037/livestream000003.ts', 'http://cnhlsvodhls01.e.vhall.com/vhallhls/
40/27/98/402798753/402798753/20170315113037/livestream000004.ts', 'http://cnhlsv
odhls01.e.vhall.com/vhallhls/40/27/98/402798753/402798753/20170315113037/livestr
eam000005.ts', 'http://cnhlsvodhls01.e.vhall.com/vhallhls/40/27/98/402798753/402
798753/20170315113037/livestream000006.ts']
"""
arr=filter(lambda i: '' '.ts'in i,r.split('\n'))
print arr
urllist=map(lambda i :hosturl+i,arr)
print urllist
m3u8file = "movie.m3u8"
with open(m3u8file,'w') as f:
    f.write(r.replace('/','-'))
#下载ts文件到本地
for i in urllist:
    urllib.urlretrieve(i,urlparse(i).path.replace('/','-'))
    time.sleep(1)
#select date_format(created_at,'%Y-%m-%d') as date,count(*) from demands where date_format(created_at,'%Y-%m-%d') in ('2016-04-18','2016-04-08') group by date order by id desc limit 5;
def transcode(m3u8file):
    # 打开m3u8文件

    try:
        with open(m3u8file) as file_:
            m3u8lines = file_.readlines()
    except Exception as e:
        return

    # 拼接所有ts切片文件成为一个ts文件。
    tsfile = "m3u8.ts"#可以用 vlc 播放器直接播放
    destfp = open(tsfile, 'wb+')
    for m3u8line in m3u8lines:
        m3u8line = m3u8line.strip("\n")
        if m3u8line.endswith(".ts"):
            srcfp = open(m3u8line, 'rb')
            buf = srcfp.read()
            srcfp.close()
            destfp.write(buf)
    if destfp:
        destfp.close()

    # ts transcode mp4 需要下载ffmpeg软件
    mp4file = tsfile.replace(".ts", ".mp4")
    # ffmpeg -i in.m3u8 -acodec copy -bsf:a aac_adtstoasc -vcodec copy out.mp4
    # # you should download the files in m3u8 file first
    #ffmpeg -i the.file.m3u8 -acodec copy -vcodec copy  -y -loglevel info -bsf:a aac_adtstoasc -f mp4 your-mp4-file.mp4

    # First 10 Minutes
    #ffmpeg -i VIDEO_SOURCE.mp4 -vcodec copy -acodec copy -ss 0 -t 00:10:00  VIDEO_PART_1.mpg
    # Second 10 Minutes
    #ffmpeg -i VIDEO_SOURCE.mp4 -vcodec copy -acodec copy -ss 00:10:00 -t 00:20:00  VIDEO_PART_2.mpg
    # Rest after the first 20 Minutes
    #ffmpeg -i VIDEO_SOURCE.mp4 -vcodec copy -acodec copy -ss 00:20:00  VIDEO_PART_3.mpg
    #ffmpeg -i "http://host/folder/file.m3u8" -bsf:a aac_adtstoasc -vcodec copy -c copy -crf 50 file.mp4
    #mp4转m3u8 http://blog.csdn.net/jookers/article/details/21694957
    #ffmpeg -i input.mp4 -c:v libx264 -c:a aac -strict -2 -f hls output.m3u8
    #fmpeg -i input0.mp4 -vn input0.mp3 -c:v libx264 -c:a aac -strict -2 -f hls -hls_list_size 0 output.m3u8
    ffts2mp4 = "ffmpeg -v error -y -analyzeduration 10000000 -i %s -vcodec copy -bsf:a aac_adtstoasc %s" % (tsfile, mp4file)
    proc = subprocess.Popen(ffts2mp4, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=0, shell=True)
    return proc.communicate()

transcode(m3u8file)
mingyun commented 7 years ago

file:///D:/ 浏览器显示文件 领先的编程训练平台

    public function handle()
    {
        $this->syncAvatar();
    }

    public function syncAvatar($retry = 0){
        $file = Common::curlGetRequest($this->avatar);
        if(!$file){
            if($retry < $this->retry){
                $retry = $retry + 1;
                sleep($this->failSleepTime);//获取失败,休眠1秒再获取
                return $this->syncAvatar($retry);
            }else{
                \Log::info('sync avatar fail', ['user_id'=>$this->userId, 'avatar'=>$this->avatar]);
                return false;
            }
        }
//入队列前将对象转成stdclass 对象,执行队列反序列化时不会执行__walkup方法
    Queue::push(new WebinarTypeChange(Common::modelToSimpleObj($webinar)), null, 'webinar_type_change');
public static function modelToSimpleObj($object)
    {
        if (!is_object($object)) {
            return $object;
        }

        $data = json_decode(json_encode($object), true);
        return (object) $data;
    }

>>> $u=user::find(1)
=> <App\Models\User #0000000015e0be27000000000104efae> {
>>> (object)json_decode(json_encode($u),1)
=> <stdClass #0000000015e0be30000000000104efae> {
mingyun commented 7 years ago

PHP并发查询MySQL 我在项目中对 MySQL 做的优化 Nginx 教程 微信机器人 使用pytest测试 [MySQL SQL优化系列]之in与range查询 城市数据库sql

class UUID
{
    /**
     * 生成UUID
     * @return string
     */
    static public function CreateId()
    {
        if (function_exists('com_create_guid')) {
            return trim(com_create_guid(), '{}');
        } else {
            mt_srand((double)microtime() * 10000);
            $charid = strtoupper(md5(uniqid(rand(), TRUE)));
            $hyphen = chr(45);
            $uuid   = substr($charid, 0, 8) . $hyphen
                . substr($charid, 8, 4) . $hyphen
                . substr($charid, 12, 4) . $hyphen
                . substr($charid, 16, 4) . $hyphen
                . substr($charid, 20, 12);

            return $uuid;
        }
    }
}
sql 转义 %— 
$subject = addcslashes(trim($subject),'%_');
select *from webinars where user_id=14 and `subject` like '%\%%';

>>> numtostr(1.2313223123423E+017)
=> "123132231234230000"
function NumToStr($num){ 
     if (stripos($num,'e')===false) return $num; 
     $num = trim(preg_replace('/[=\'"]/','',$num,1),'"');//出现科学计数法,还原成字符串 
    $result = ""; 
     while ($num > 0){ 
         $v = $num - floor($num / 10)*10; 
         $num = floor($num / 10); 
         $result   =   $v . $result; 
     }
     return $result; 
 }
 $data = array(
        array( 'row_1_col_1', 'row_1_col_2', 'row_1_col_3' ),
        array( 'row_2_col_1', 'row_2_col_2', 'row_2_col_3' ),
        array( 'row_3_col_1', 'row_3_col_2', 'row_3_col_3' ),
    );
$filename = "example";

    header("Content-type: text/csv");
    header("Content-Disposition: attachment; filename={$filename}.csv");
    header("Pragma: no-cache");
    header("Expires: 0");

outputCSV($data);
 //http://phpff.com/667.html
function outputCSV($data) {
        $outputBuffer = fopen("php://output", 'w');
        foreach($data as $val) {
        foreach ($val as $key => $val2) {
         $val[$key] = iconv('utf-8', 'gbk', $val2);
// CSV的Excel支持GBK编码,一定要转换,否则乱码
         }
            fputcsv($outputBuffer, $val);
        }
        fclose($outputBuffer);
    }
ip.cn
ipinfo.io
cip.cc
ifconfig.me
myip.ipip.net
mingyun commented 7 years ago

http://videojj.com/ https://git.oschina.net/xujian_jason/wp-jief-videojj 查询结果增加一列固定值SELECT T.*,100 as c FROM tab1 T; http://chartmage.com/index.html http://www.bttit.com/torrent/%E5%8D%A7%E8%99%8E%E8%97%8F%E9%BE%99.html http://phpff.com/1076.html laravel5 扩展包 Bing 的每日壁纸 壁纸 https://github.com/xcss/bing https://github.com/lord63/wonderful_bing MySQL 错误 “could not be resolved: ..”

mysql 默认对每个client的地址会进行dns反查,然后确认是否已经授权
skip-host-cache
skip-name-resolve
使用 general query log
修改 error log 配置log_warnings = 2
cat /var/log/mysql/error.err | egrep '[aA]ccess denied'
http://blog.csdn.net/orangleliu/article/details/54631289 
先创建分区表,然后导出原表数据,新表名称改为原表名,然后插入,最后建立普通索引。
mysqldump -u dbname -p --no-create-info dbname apdailysts  > apdailysts.sql
SHOW VARIABLES LIKE 'log_bin';查看二进制日志是否打开
查看binlog 文件的目录位置SHOW VARIABLES LIKE 'datadir';
mysqlbinlog --start-datetime="2017-01-09 17:50:00" --stop-datetime="2017-01-09 18:00:00" bin.000025
shell> mysqlbinlog --start-datetime="2017-01-09 17:55:00" --stop-datetime="2017-01-09 18:00:00" --base64-output=decode-rows --verbose bin.000025
--start-datetime 开始时间
--stop-datetime  结束时间
--database=resource 选择数据库
--result-file 结果输出到某个文件
--base64-output=decode-rows 
--verbose
根据position从20-2000查找resource库相关记录,并输出到指定文件:

# mysqlbinlog --start-position="20" --stop-position="2000" --database=resource mysql-bin.407 --result-file=result.sql
查找并导入数据库:

# mysqlbinlog --start-position="20" --stop-position="2000" --database=resource mysql-bin.407 | mysql -u root
还可以根据时间来查找记录:

# mysqlbinlog --start-datetime="2012-09-20 8:10:00" --stop-datetim="2012-09-25 07:30:00" mysql-bin.407 --result-file=result.sql
从position=387426452开始导出resource数据库信息

#/usr/local/mysql/bin/mysqlbinlog --position=387426452 --set-charset=utf8 --database=resource mysql-bin.407 --result-file=result_resource.sql
(2)转换编码为utf8

#iconv -t utf-8 -f gb2312 -c result_resource.sql > new_result_resource.sql_utf8.sql
(3)导入数据库 
在mysql会话中需要set names utf8;

mysql> set names utf8;
mysql> source /tmp/new_result_resource.sql_utf8.sql
mingyun commented 7 years ago

mysql每日备份shell脚本

#!/bin/bash
#每天早上4点, mysql备份数据 orangleliu
#chmod 700 backup.sh
#crontab -e
#0 4 * * * /home/erya/run/moniter/mysql_backup.sh

user="radius"
password=""
host="localhost"
db_name="radius"

#backup directory 
base_dir="/home/backup"
backup_log="${base_dir}/mysql_backup.log"
backup_path=${base_dir}

#file info
date=$(date +"%Y-%m-%d")
filename=$backup_path/$db_name-$date.sql.gz
umask 177

#back up 
mysqldump --user=$user --password=$password --host=$host $db_name | gzip > $filename
#Delete files older than 10 days
find $backup_path/* -mtime +10 -exec rm {} \;
echo "backup finished at $(date +'%Y-%m-%d %H:%M:%S')" >> "$backup_log"

#按消耗时间从大到小排列, 如果数据集的范围太小或者筛选后没有取到需要一个默认值  
SELECT *   
FROM `test`   
WHERE id >= (SELECT FLOOR( MAX(id) * RAND()) FROM `test` ) and url like "%wifi21%"  
ORDER BY id LIMIT 10;  

select * from `test` where test.dmac="A8:15:4D:B1:7D:76" and test.url like "%wifi21%"  
ORDER BY rand() LIMIT 10;  

SELECT *   
FROM `test` AS t1 JOIN (SELECT ROUND(RAND() * (SELECT MAX(id) FROM `test`)) AS id) AS t2   
WHERE t1.dmac="A8:15:4D:B1:7D:76" and t1.url like "%wifi21%" and  t1.id >= t2.id  
ORDER BY t1.id ASC LIMIT 10;   

SELECT * FROM `test`  
WHERE id >= (SELECT floor(RAND() * (SELECT MAX(id) FROM `test`)))   
AND dmac="A8:15:4D:B1:7D:76" and url like "%wifi21%"  
ORDER BY id LIMIT 10;  

SELECT * FROM `test`   
AS t1 JOIN (SELECT ROUND(RAND() * ((SELECT MAX(id) FROM `test`)-(SELECT MIN(id) FROM `test`))+(   
SELECT MIN(id) FROM `test`)) AS id) AS t2   
WHERE t1.id >= t2.id and t1.dmac="A8:15:4D:B1:7D:76" and t1.url like "%wifi21%"   
ORDER BY t1.id LIMIT 10;  
mingyun commented 7 years ago

连接查询改嵌套查询一例

统计前一段时间所有用户流量使用情况,也就是用户流量使用的总和。
EXPLAIN SELECT sum(s1.acctinputoctets - s2.acctinputoctets), sum(s1.acctoutputoctets-s2.acctoutputoctets) 
FROM count_statusbyhour s1  LEFT JOIN count_statusbyhour s2 ON s1.username = s2.username WHERE  s1.addtime > '2017-01-20 21:55:01'  
AND s1.addtime < '2017-01-20 22:05:01' AND s2.addtime BETWEEN '2017-01-20 20:55:01' AND '2017-01-20 21:25:01' \G

 EXPLAIN
    -> SELECT sum(s3.acctinputoctets - s4.acctinputoctets), sum(s3.acctoutputoctets-s4.acctoutputoctets) FROM
    -> (SELECT acctinputoctets, acctoutputoctets, addtime, username FROM count_statusbyhour s1 WHERE s1.addtime > '2017-01-20 21:55:01' AND s1.addtime < '2017-01-20 22:05:01') s3 LEFT JOIN
    -> (SELECT acctinputoctets, acctoutputoctets, addtime, username FROM count_statusbyhour s2 WHERE s2.addtime BETWEEN '2017-01-20 20:55:01' AND '2017-01-20 21:25:01') s4 ON  s3.username=s4.username \G
set profiling=1;
show profile cpu,block io  for query 1;所有时间都花费在 Sending data 的 Block_ops_out 操作上

刷 票 http://m.kaolafm.com/event/meizu/list-top.html http://m.kaolafm.com/event/meizu/detail.html?rid=367&from=timeline&isappinstalled=0 http://m.kaolafm.com/event/meizu/

mysqlbinlog --stop-position=1 --stop-position=当前pos --database=database /usr/local/mysql/var/mysql-bin.000001 | grep teams >more.sql 就是把这张表从头到尾的相关语句从日志里导出来而已 MySQL 分区表以及操作

分表的一般 参考(只是参考): 表体积大于2g,简单查询表数据超过1000w行,复杂查询表超过200w行。 分区表无法使用外键约束 主表的所有唯一索引(包括主键)都必须包含分区字段 所有数据还在一个表中,但物理存储根据一定的规则放在不同的文件中。这个是mysql支持的功能,业务rd代码无需改动。
什么是mysql的分库分表?
回答:把一个很大的库(表)的数据分到几个库(表)中,每个库(表)的结构都相同,但他们可能分布在不同的mysql实例,甚至不同的物理机器上,以达到降低单库(表)数据量,提高访问性能的目的。
分库分表往往是业务层实施的,分库分表后,为了满足某些特定业务功能,往往需要rd修改代码。
PARTITION BY HASH( YEAR(hired) )
PARTITIONS 4; 使用返回的整数对分区数取模。explain partitions命令来查看SQL对于分区的使用情况。

) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci COMMENT='访问活动用户信息表'
/*!50100 PARTITION BY HASH (webinar_id)
PARTITIONS 64 */;
mysql> desc select *from webinar_user_regs where webinar_id=114002193\G
*************************** 1. row ***************************
           id: 1
  select_type: SIMPLE
        table: webinar_user_regs
         type: ref
possible_keys: idx_webinar_id
          key: idx_webinar_id
      key_len: 4
          ref: const
         rows: 2
        Extra: NULL
1 row in set (0.03 sec)

mysql> desc partitions  select *from webinar_user_regs where webinar_id=11400219
3\G
*************************** 1. row ***************************
           id: 1
  select_type: SIMPLE
        table: webinar_user_regs
   partitions: p17
         type: ref
possible_keys: idx_webinar_id
          key: idx_webinar_id
      key_len: 4
          ref: const
         rows: 2
        Extra: NULL
1 row in set (0.00 sec)

mysql> show variables like '%part%';
Empty set (0.00 sec)

mysql> desc partitions  select *from webinar_user_regs where role_name='host'\G
*************************** 1. row ***************************
           id: 1
  select_type: SIMPLE
        table: webinar_user_regs
   partitions: p0,p1,p2,p3,p4,p5,p6,p7,p8,p9,p10,p11,p12,p13,p14,p15,p16,p17,p18
,p19,p20,p21,p22,p23,p24,p25,p26,p27,p28,p29,p30,p31,p32,p33,p34,p35,p36,p37,p38
,p39,p40,p41,p42,p43,p44,p45,p46,p47,p48,p49,p50,p51,p52,p53,p54,p55,p56,p57,p58
,p59,p60,p61,p62,p63
         type: ref
possible_keys: idx_role_name
          key: idx_role_name
      key_len: 62
          ref: const
         rows: 91
        Extra: Using where
1 row in set (0.00 sec)

数据库表目录文件 show variables like 'datadir'; t6#p#p0.ibd t6#p#p1.ibd t6.frm t6.par http://blog.csdn.net/yongchao940/article/details/55266603

mingyun commented 7 years ago
private function connect($retry = 0){
        try{       
     $this->_redis = new \Redis();
            $this->_redis->pconnect(env('REDIS_HOST','test-web'), env('REDIS_PORT','23244'), 3);
            $this->_redis->ping();
        }catch (\Exception $e){
            if($retry < $this->_retry){
                usleep($this->_retrySleep);
                $retry = $retry + 1;
                $this->connect($retry);
            }else{
                \Log::error($e->getMessage(), ['class'=>__CLASS__]);
            }
        }
    }
use App\Services\AsyncWrite; 
class test extends Model
{
    use AsyncWrite;#异步写 trait

爱程序员 导出csv

function arraytocsv($array,$head){
    $s=implode(',', $head).PHP_EOL;
    echo iconv('utf-8','GBK//IGNORE',$s);
    foreach ($array as $key => $value) {
        $s=implode(',', $value).PHP_EOL;
        echo iconv('utf-8','GBK//IGNORE',$s);
    }
}
//https://packagist.org/packages/maatwebsite/excel
function arraytoexecl($array,$head){
    array_unshift($array,$head);
    Excel::create('业务单'.date('YmdHis'), function($excel) use($array){
            $excel->sheet('Sheetname', function($sheet) use($array) {
                $sheet->fromArray($array, null, 'A1', false, false);
            });

        })->export('xlsx');
}
var ws = new WebSocket("wss://echo.websocket.org");

ws.onopen = function(evt) { 
  console.log("Connection open ..."); 
  ws.send("Hello WebSockets!");
};

ws.onmessage = function(evt) {
  console.log( "Received Message: " + evt.data);
  ws.close();
};

ws.onclose = function(evt) {
  console.log("Connection closed.");
};      
(evt) {
  console.log("Connection closed.");
}
Connection open ...
VM1106:9 Received Message: Hello WebSockets!
VM1106:14 Connection closed.

TensorFlow 是一个用于人工智能的开源神器http://www.tensorfly.cn http://www.workerman.net/phpsocket_io

PHPSocket.IO是PHP版本的Socket.IO服务端实现 gethostbyname('')https://github.com/We5ter/Scanners-Box/blob/master/README_CN.md http://pmd5.com/# php部署工具https://deployer.org/

"603E-4234" == "272E-3063" => true

mingyun commented 7 years ago

Redis Cluster 运维测试报告【功能性】 替代twemproxy redis 安装后的状态:

  1. redis 使用3.2.9版本;
  2. redis 配置文件中使用集群模式(cluster-enabled yes);
  3. 模拟测试中,在同一台服务器上启用6个 redis 实例,3主3从; 测试过程

高可用性

  1. 启动所有实例后,使用 redis 官方提供的 .ruby 脚本直接创建一个有3主3从,共6个节点的集群,集群共 16384 个 slot,平均分配在3个主节点上;
  2. 杀掉其中一个 redis 从节点的进程, 没有任何影响,因为没有 slot 覆盖;
  3. 杀掉其中一个主节点进程,对应的从节点可以自动升级为主节点,对外提供服务不受影响;
  4. 杀掉两个/三个从节点,没有任何影响;
  5. 杀掉两个主节点,对应的从节点可同时升级为主节点,服务不受影响;
  6. 同时杀掉一个主节点和对应的从节点,集群就崩溃了,服务会受影响,所以,在实际线上部署时,主节点和从节点尽量部署在不同的服务器上,实行交叉部署,可以更好地保证高可用行; 扩展性 模拟新增一个主节点,一个从节点
  7. 使用不同端口启动对应的两个 redis 实例,这两个实例可以不配置主从关系,后期靠集群来管理和维护;
  8. 使用 .ruby 脚本添加一个节点至已有集群为主节点;
  9. 使用 .ruby 脚本将部分 slot 划分给新添加的主节点,这个过程中,会涉及到大量的 I/O 操作,会大大降低 redis 的访问速度,所以,添加节点的过程一定要在非高峰业务时段来操作;
  10. 使用 .ruby 脚本添加一个新节点以从节点模式到集群内,添加完成后,如果对应的主节点数据量比较大,同样会有大量 I/O 操作; 删除节点 与添加节点顺序相反,先移除对应的 slot,再删除节点。 只是仍然设计到 I/O 操作,需要注意操作时间。 综上所述,集群模式功能基本满足高可用的要求。

需要进一步验证的是:

  1. 业务上所涉及到的命令是否全都支持
  2. php的 redis 扩展(php-pecl-redis)是否支持集群模式
  3. 流媒体方面,python 的 redis 扩展已经测试过目前不支持 redis 集群模式,需要更新
  4. 高并发时的性能 laravel predis 不能使用 mget