Open tfzxyinhao opened 10 years ago
OK,我晚上回去看下,复现的代码最好贴下
Client.js
function FDFS(){ this.fdClient = new FdfsClient(config.fdfsConfig); } FDFS.prototype.upload = function(fileArr,cb){ var self = this; async.each(fileArr,function(item,callback){ self.fdClient.upload(item[0],{}, function (err,path) { logger.info("upload %s to %s,%j",item[0],path,err); item.push(path); callback(err); }); }, function (err) { cb(err); }); };
以下是从项目中提取出来的,经过精简的
var client = new FDFS; var files = [["/home/tmp/1.jpg"],["/home/tmp/2.jpg"],["/home/tmp/3.jpg"],["/home/tmp/3.jpg"],["/home/tmp/4.jpg"],["/home/tmp/5.jpg"],...["/home/tmp/185.jpg"]] if(files.length > 0) { client .doUpload(files, function (err) { if(err) logger.error("upload error:%j",err); }); }
对于批量上传文件,能否提供批量上传的功能,每次上传完成不关闭和tracker之间的socket,直接上传下一个文件,套接字的连接和关闭开销还是很大的,批量上传是很常见的应用场景,对于大规模的文件存储,文件的上传必然是批量的
当时本来想加连接池功能来着,因为时间有点紧迫加上我这边没有频繁的批量上传文件的场景,所以先没实现了。我找时间实现下。
另外is-type-of,这不是有么。
重现了吗
我本地测试会报跟你不一样的错,报打开文件数过多,我看下怎么解决吧,你现在这样是并发的去传,你着急的话先改成顺序的试试,我找时间再优化下。
另外你提供下你的node版本,最近比较忙,找时间再仔细调
v0.10.15
你好作者,我的node v0.10.26版本如下测试
var FdfsClient = require('fdfs-client'); var debug = require('debug')('fdfs'); var fdfs = new FdfsClient({ // tracker servers trackers: [ { host: '192.168.1.103', port: 22122 } ], // 默认超时时间10s timeout: 10000, // 默认后缀 // 当获取不到文件后缀时使用 defaultExt: 'txt', // charset默认utf8 charset: 'utf8', logger: { log: debug } });
var option ={ ext: 'jpg' } fdfs.upload('./10005515_3.JPG',option, function(err, fileId) { // fileId 为 group + '/' + filename console.log(err); console.log(fileId) });
报错如下,请问题util是用的哪里的util啊。 D:\soft\nodejs\aamUpLife\node_modules\fdfs-client\lib\fdfs.js:145 if (util.isFunction(options)) {//这里报错 has no method 'isFunction' ^ TypeError: Object #
@guhaizhous 你重新安装下最新版本的模块,因为最早我是在node 0.11版本上测的,没注意。。,我后来改成都用is-type-of
模块了。
is-type-of 里面也是没有isFunction这个方法的。
最新版本没有传到npm上
node v0.10.29报同样的错误,if (util.isFunction(options)) {//这里报错 has no method 'isFunction' 把util替换成is-type-of,然后修改相关引用的地方, 把util.isFunction 替换成is.function,还有util.isString替换成is.string,util.isBuffer替换成is.buffer 完成后测试通过!!!
@luoyoubao 我更新了下版本号,以前不知道为什么没publish上去,你重新install再看看是否OK
@chenboxiang 更新0.5.4的版本测试过了,已经可以了,谢谢大牛^_^
nice O(∩_∩)O~
用于生产环境的话,最好能支持批量上传,要不然不断连接关闭也是相当耗费资源的,性能跟不上来啊,大牛继续给力啊 [2014-12-09 15:19:22.995] [DEBUG] lookapp - The file length:5 [2014-12-09 15:19:23.116] [DEBUG] console - get a upload storage server from tracker server: [192.168.0.13:22122] [2014-12-09 15:19:23.116] [DEBUG] console - connect to tracker server [192.168.0.13:22122] [2014-12-09 15:19:23.119] [DEBUG] console - get a upload storage server from tracker server: [192.168.0.13:22122] [2014-12-09 15:19:23.120] [DEBUG] console - connect to tracker server [192.168.0.13:22122] [2014-12-09 15:19:23.121] [DEBUG] console - get a upload storage server from tracker server: [192.168.0.13:22122] [2014-12-09 15:19:23.121] [DEBUG] console - connect to tracker server [192.168.0.13:22122] [2014-12-09 15:19:23.122] [DEBUG] console - get a upload storage server from tracker server: [192.168.0.13:22122] [2014-12-09 15:19:23.122] [DEBUG] console - connect to tracker server [192.168.0.13:22122] [2014-12-09 15:19:23.125] [DEBUG] console - get a upload storage server from tracker server: [192.168.0.13:22122] [2014-12-09 15:19:23.125] [DEBUG] console - connect to tracker server [192.168.0.13:22122] [2014-12-09 15:19:23.161] [DEBUG] console - tracker server [192.168.0.13:22122] is connected [2014-12-09 15:19:23.162] [DEBUG] console - send header to tracker server [192.168.0.13:22122] [2014-12-09 15:19:23.163] [DEBUG] console - tracker server [192.168.0.13:22122] is connected [2014-12-09 15:19:23.163] [DEBUG] console - send header to tracker server [192.168.0.13:22122] [2014-12-09 15:19:23.163] [DEBUG] console - tracker server [192.168.0.13:22122] is connected [2014-12-09 15:19:23.163] [DEBUG] console - send header to tracker server [192.168.0.13:22122] [2014-12-09 15:19:23.163] [DEBUG] console - tracker server [192.168.0.13:22122] is connected [2014-12-09 15:19:23.163] [DEBUG] console - send header to tracker server [192.168.0.13:22122] [2014-12-09 15:19:23.164] [DEBUG] console - tracker server [192.168.0.13:22122] is connected [2014-12-09 15:19:23.164] [DEBUG] console - send header to tracker server [192.168.0.13:22122] [2014-12-09 15:19:23.165] [DEBUG] console - receive server packet header: {"status":0,"bodyLength":40} [2014-12-09 15:19:23.165] [DEBUG] console - get store storage server info: {"group":"xiupinjie01","host":"192.168.0.13","port":23000,"storePathIndex":0,"timeout":10000,"charset":"utf8"} [2014-12-09 15:19:23.166] [DEBUG] console - connect to storage server [192.168.0.13:23000]. [2014-12-09 15:19:23.167] [DEBUG] console - receive server packet header: {"status":0,"bodyLength":40} [2014-12-09 15:19:23.167] [DEBUG] console - get store storage server info: {"group":"xiupinjie01","host":"192.168.0.13","port":23000,"storePathIndex":0,"timeout":10000,"charset":"utf8"} [2014-12-09 15:19:23.167] [DEBUG] console - connect to storage server [192.168.0.13:23000]. [2014-12-09 15:19:23.169] [DEBUG] console - receive server packet header: {"status":0,"bodyLength":40} [2014-12-09 15:19:23.169] [DEBUG] console - get store storage server info: {"group":"xiupinjie01","host":"192.168.0.13","port":23000,"storePathIndex":0,"timeout":10000,"charset":"utf8"} [2014-12-09 15:19:23.169] [DEBUG] console - connect to storage server [192.168.0.13:23000]. [2014-12-09 15:19:23.170] [DEBUG] console - receive server packet header: {"status":0,"bodyLength":40} [2014-12-09 15:19:23.170] [DEBUG] console - get store storage server info: {"group":"xiupinjie01","host":"192.168.0.13","port":23000,"storePathIndex":0,"timeout":10000,"charset":"utf8"} [2014-12-09 15:19:23.170] [DEBUG] console - connect to storage server [192.168.0.13:23000]. [2014-12-09 15:19:23.171] [DEBUG] console - receive server packet header: {"status":0,"bodyLength":40} [2014-12-09 15:19:23.171] [DEBUG] console - get store storage server info: {"group":"xiupinjie01","host":"192.168.0.13","port":23000,"storePathIndex":0,"timeout":10000,"charset":"utf8"}
嗯,我先支持下批量吧,开始就有人提过了,一直忙,没顾上改。
恩,请教楼主一个问题,我们项目中现在需要存储大量的图片文件,而且需要生成缩略图,目前考虑用fastdfs做文件存储,主要思路有两种: 一种是上传时实时压缩或裁剪; 另外一种是下载时根据原图实时生成缩略图; 实时裁剪的目前来看主要是裁剪和上传比较耗费性能,测试了下使用fdfs-client裁剪加上上传5张图片大概用了1-2秒左右的时间,再加上网络上传时间,估计如果客户端上传3-5张图片,1M左右需要平均3-5秒,这样加起来服务器响应时间大概5-10秒左右了,当然这个还没考虑到并发场景; 另外一种方案考虑请求时实时压缩,如nginx+lua_nginx+GraphicsMagick,但是这种方案对服务器压力较大; 不知道楼主是否了解一些类似的解决方案!因为使用NODE不久,如果方便是否可以加QQ请教下! QQ: 104918751
裁剪完了直接存储到文件系统,每次上来先从文件系统读,有就直接获取,没就裁剪完了,把图片存储下来,这样就不需要重复裁剪了。
下载文件失败错误代码: Error: receive packet errno is: 2 请问如何解决,新手求指教
var ws = fs.createWriteStream("e:/app.rar"); this._fdfs.download(_file_id, ws, function (err) { if (err) { console.log(err); deferred.reject(err.toString()); }else{
deferred.resolve(null);
}
});
由于我自己封装了一个logger(log4js),还有我使用npm安装is-type-of这个的时候提示不存在 所以我对你的fdfs客户端做了一些修改 1.替换logger为我的logger 2.使用isstream代替is-type-of
最近在上传大概180个的图片的时候,上传到一半数量的图片就会进程崩溃,每次必现 相关代码
callback定义
cleanup定义
closeSocket定义
以下是进程崩溃时的调用栈