satazor / js-spark-md5

Lightning fast normal and incremental md5 for javascript
Do What The F*ck You Want To Public License
2.46k stars 470 forks source link

Array buffer allocation failed #52

Open poberwong opened 5 years ago

poberwong commented 5 years ago

Platform

32 bit Windows with 4 GB memory

Error log

2018-09-29 08:57:04,468: message:{msg=[***]RangeError: Array buffer allocation failed    @ new ArrayBuffer (
<anonymous>)    @ new Uint8Array (native)    @ d (https://www.***.com/static/js/main.173fa413.js:34:14944)    @ p.ArrayBu
ffer.append (https://www.****.com/static/js/main.173fa413.js:34:17224)    @ FileReader.l.onload (https://www.***.com/
static/js/main.173fa413.js:11:7385), t=1538182625000, body=, userId=139}
2018-09-29 10:27:26,023: message:{msg=[***]RangeError: Array buffer allocation failed    @ new ArrayBuffer (
<anonymous>)    @ typedArrayConstructByLength (<anonymous>)    @ new Uint8Array (native)    @ d (https://www.***.com/stat
ic/js/main.173fa413.js:34:14944)    @ p.ArrayBuffer.append (https://www.***.com/static/js/main.173fa413.js:34:17224)    @
 FileReader.l.onload (https://www.***.com/static/js/main.173fa413.js:11:7385), t=1538188042322, body=, userId=1013}

My core code

return new Promise((resolve, reject) => {
    let chunkCount = Math.ceil(file.size / hashChunkSize)
    let currentChunk = 0
    let spark = new SparkMD5.ArrayBuffer()
    let sparkArray = []
    let fileReader = new FileReader()

    fileReader.onload = function (event) {
      sparkArray.push(SparkMD5.ArrayBuffer.hash(event.target.result))
      spark.append(event.target.result)                // Append array buffer
      currentChunk++

      if (currentChunk < chunkCount) {
        loadNext()
      } else {
        resolve({
          md5: spark.end(),
          md5Array: sparkArray
        })
      }
    }

    fileReader.onerror = () => {
      reject(new Error('read file failed'))
    }

    function loadNext() {
      let start = currentChunk * hashChunkSize
      let end = ((start + hashChunkSize) >= file.size) ? file.size : start + hashChunkSize
      fileReader.readAsArrayBuffer(blobSlice.call(file, start, end))
    }

    loadNext()
  })

It seems that spark.append(event.target.result) can't get enough memory...