mirror of
https://github.com/mozilla/pdf.js.git
synced 2025-04-22 16:18:08 +02:00
Merge pull request #11707 from Snuffleupagus/issue-11694
Always prefer the PDF.js JPEG decoder for very large images, in order to reduce peak memory usage (issue 11694)
This commit is contained in:
commit
292b77fe7b
1 changed files with 11 additions and 0 deletions
|
@ -134,6 +134,17 @@ const JpegStream = (function JpegStreamClosure() {
|
|||
stream.pos += 2; // Skip marker length.
|
||||
stream.pos += 1; // Skip precision.
|
||||
const scanLines = stream.getUint16();
|
||||
const samplesPerLine = stream.getUint16();
|
||||
|
||||
// Letting the browser handle the JPEG decoding, on the main-thread,
|
||||
// will cause a *large* increase in peak memory usage since there's
|
||||
// a handful of short-lived copies of the image data. For very big
|
||||
// JPEG images, always let the PDF.js image decoder handle them to
|
||||
// reduce overall memory usage during decoding (see issue 11694).
|
||||
if (scanLines * samplesPerLine > 1e6) {
|
||||
validDimensions = false;
|
||||
break;
|
||||
}
|
||||
|
||||
// The "normal" case, where the image data and dictionary agrees.
|
||||
if (scanLines === dictHeight) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue