mirror of
https://github.com/mozilla/pdf.js.git
synced 2025-04-26 10:08:06 +02:00
[Editor] Add support for printing/saving newly added Stamp annotations
In order to minimize the size the of a saved pdf, we generate only one image and use a reference in each annotation using it. When printing, it's slightly different since we have to render each page independantly but we use the same image within a page.
This commit is contained in:
parent
ccb72073b0
commit
599b9498f2
9 changed files with 519 additions and 19 deletions
|
@ -23,6 +23,7 @@ import {
|
|||
AnnotationType,
|
||||
assert,
|
||||
BASELINE_FACTOR,
|
||||
FeatureTest,
|
||||
getModificationDate,
|
||||
IDENTITY_MATRIX,
|
||||
LINE_DESCENT_FACTOR,
|
||||
|
@ -52,15 +53,16 @@ import {
|
|||
parseDefaultAppearance,
|
||||
} from "./default_appearance.js";
|
||||
import { Dict, isName, Name, Ref, RefSet } from "./primitives.js";
|
||||
import { Stream, StringStream } from "./stream.js";
|
||||
import { writeDict, writeObject } from "./writer.js";
|
||||
import { BaseStream } from "./base_stream.js";
|
||||
import { bidi } from "./bidi.js";
|
||||
import { Catalog } from "./catalog.js";
|
||||
import { ColorSpace } from "./colorspace.js";
|
||||
import { FileSpec } from "./file_spec.js";
|
||||
import { JpegStream } from "./jpeg_stream.js";
|
||||
import { ObjectLoader } from "./object_loader.js";
|
||||
import { OperatorList } from "./operator_list.js";
|
||||
import { StringStream } from "./stream.js";
|
||||
import { XFAFactory } from "./xfa/factory.js";
|
||||
|
||||
class AnnotationFactory {
|
||||
|
@ -257,11 +259,31 @@ class AnnotationFactory {
|
|||
}
|
||||
}
|
||||
|
||||
static async saveNewAnnotations(evaluator, task, annotations) {
|
||||
static generateImages(annotations, xref, isOffscreenCanvasSupported) {
|
||||
if (!isOffscreenCanvasSupported) {
|
||||
warn(
|
||||
"generateImages: OffscreenCanvas is not supported, cannot save or print some annotations with images."
|
||||
);
|
||||
return null;
|
||||
}
|
||||
let imagePromises;
|
||||
for (const { bitmapId, bitmap } of annotations) {
|
||||
if (!bitmap) {
|
||||
continue;
|
||||
}
|
||||
imagePromises ||= new Map();
|
||||
imagePromises.set(bitmapId, StampAnnotation.createImage(bitmap, xref));
|
||||
}
|
||||
|
||||
return imagePromises;
|
||||
}
|
||||
|
||||
static async saveNewAnnotations(evaluator, task, annotations, imagePromises) {
|
||||
const xref = evaluator.xref;
|
||||
let baseFontRef;
|
||||
const dependencies = [];
|
||||
const promises = [];
|
||||
const { isOffscreenCanvasSupported } = evaluator.options;
|
||||
|
||||
for (const annotation of annotations) {
|
||||
if (annotation.deleted) {
|
||||
|
@ -293,6 +315,36 @@ class AnnotationFactory {
|
|||
promises.push(
|
||||
InkAnnotation.createNewAnnotation(xref, annotation, dependencies)
|
||||
);
|
||||
break;
|
||||
case AnnotationEditorType.STAMP:
|
||||
if (!isOffscreenCanvasSupported) {
|
||||
break;
|
||||
}
|
||||
const image = await imagePromises.get(annotation.bitmapId);
|
||||
if (image.imageStream) {
|
||||
const { imageStream, smaskStream } = image;
|
||||
const buffer = [];
|
||||
if (smaskStream) {
|
||||
const smaskRef = xref.getNewTemporaryRef();
|
||||
await writeObject(smaskRef, smaskStream, buffer, null);
|
||||
dependencies.push({ ref: smaskRef, data: buffer.join("") });
|
||||
imageStream.dict.set("SMask", smaskRef);
|
||||
buffer.length = 0;
|
||||
}
|
||||
const imageRef = (image.imageRef = xref.getNewTemporaryRef());
|
||||
await writeObject(imageRef, imageStream, buffer, null);
|
||||
dependencies.push({ ref: imageRef, data: buffer.join("") });
|
||||
image.imageStream = image.smaskStream = null;
|
||||
}
|
||||
promises.push(
|
||||
StampAnnotation.createNewAnnotation(
|
||||
xref,
|
||||
annotation,
|
||||
dependencies,
|
||||
image
|
||||
)
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -302,7 +354,12 @@ class AnnotationFactory {
|
|||
};
|
||||
}
|
||||
|
||||
static async printNewAnnotations(evaluator, task, annotations) {
|
||||
static async printNewAnnotations(
|
||||
evaluator,
|
||||
task,
|
||||
annotations,
|
||||
imagePromises
|
||||
) {
|
||||
if (!annotations) {
|
||||
return null;
|
||||
}
|
||||
|
@ -331,6 +388,23 @@ class AnnotationFactory {
|
|||
})
|
||||
);
|
||||
break;
|
||||
case AnnotationEditorType.STAMP:
|
||||
if (!isOffscreenCanvasSupported) {
|
||||
break;
|
||||
}
|
||||
const image = await imagePromises.get(annotation.bitmapId);
|
||||
if (image.imageStream) {
|
||||
const { imageStream, smaskStream } = image;
|
||||
if (smaskStream) {
|
||||
imageStream.dict.set("SMask", smaskStream);
|
||||
}
|
||||
image.imageRef = new JpegStream(imageStream, imageStream.length);
|
||||
image.imageStream = image.smaskStream = null;
|
||||
}
|
||||
promises.push(
|
||||
StampAnnotation.createNewPrintAnnotation(xref, annotation, image)
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4361,6 +4435,143 @@ class StampAnnotation extends MarkupAnnotation {
|
|||
this.data.annotationType = AnnotationType.STAMP;
|
||||
this.data.hasOwnCanvas = this.data.noRotate;
|
||||
}
|
||||
|
||||
static async createImage(bitmap, xref) {
|
||||
// TODO: when printing, we could have a specific internal colorspace
|
||||
// (e.g. something like DeviceRGBA) in order avoid any conversion (i.e. no
|
||||
// jpeg, no rgba to rgb conversion, etc...)
|
||||
|
||||
const { width, height } = bitmap;
|
||||
const canvas = new OffscreenCanvas(width, height);
|
||||
const ctx = canvas.getContext("2d", { alpha: true });
|
||||
|
||||
// Draw the image and get the data in order to extract the transparency.
|
||||
ctx.drawImage(bitmap, 0, 0);
|
||||
const data = ctx.getImageData(0, 0, width, height).data;
|
||||
const buf32 = new Uint32Array(data.buffer);
|
||||
const hasAlpha = buf32.some(
|
||||
FeatureTest.isLittleEndian
|
||||
? x => x >>> 24 !== 0xff
|
||||
: x => (x & 0xff) !== 0xff
|
||||
);
|
||||
|
||||
if (hasAlpha) {
|
||||
// Redraw the image on a white background in order to remove the thin gray
|
||||
// line which can appear when exporting to jpeg.
|
||||
ctx.fillStyle = "white";
|
||||
ctx.fillRect(0, 0, width, height);
|
||||
ctx.drawImage(bitmap, 0, 0);
|
||||
}
|
||||
|
||||
const jpegBufferPromise = canvas
|
||||
.convertToBlob({ type: "image/jpeg", quality: 1 })
|
||||
.then(blob => {
|
||||
return blob.arrayBuffer();
|
||||
});
|
||||
|
||||
const xobjectName = Name.get("XObject");
|
||||
const imageName = Name.get("Image");
|
||||
const image = new Dict(xref);
|
||||
image.set("Type", xobjectName);
|
||||
image.set("Subtype", imageName);
|
||||
image.set("BitsPerComponent", 8);
|
||||
image.set("ColorSpace", Name.get("DeviceRGB"));
|
||||
image.set("Filter", Name.get("DCTDecode"));
|
||||
image.set("BBox", [0, 0, width, height]);
|
||||
image.set("Width", width);
|
||||
image.set("Height", height);
|
||||
|
||||
let smaskStream = null;
|
||||
if (hasAlpha) {
|
||||
const alphaBuffer = new Uint8Array(buf32.length);
|
||||
if (FeatureTest.isLittleEndian) {
|
||||
for (let i = 0, ii = buf32.length; i < ii; i++) {
|
||||
alphaBuffer[i] = buf32[i] >>> 24;
|
||||
}
|
||||
} else {
|
||||
for (let i = 0, ii = buf32.length; i < ii; i++) {
|
||||
alphaBuffer[i] = buf32[i] & 0xff;
|
||||
}
|
||||
}
|
||||
|
||||
const smask = new Dict(xref);
|
||||
smask.set("Type", xobjectName);
|
||||
smask.set("Subtype", imageName);
|
||||
smask.set("BitsPerComponent", 8);
|
||||
smask.set("ColorSpace", Name.get("DeviceGray"));
|
||||
smask.set("Width", width);
|
||||
smask.set("Height", height);
|
||||
|
||||
smaskStream = new Stream(alphaBuffer, 0, 0, smask);
|
||||
}
|
||||
const imageStream = new Stream(await jpegBufferPromise, 0, 0, image);
|
||||
|
||||
return {
|
||||
imageStream,
|
||||
smaskStream,
|
||||
width,
|
||||
height,
|
||||
};
|
||||
}
|
||||
|
||||
static createNewDict(annotation, xref, { apRef, ap }) {
|
||||
const { rect, rotation, user } = annotation;
|
||||
const stamp = new Dict(xref);
|
||||
stamp.set("Type", Name.get("Annot"));
|
||||
stamp.set("Subtype", Name.get("Stamp"));
|
||||
stamp.set("CreationDate", `D:${getModificationDate()}`);
|
||||
stamp.set("Rect", rect);
|
||||
stamp.set("F", 4);
|
||||
stamp.set("Border", [0, 0, 0]);
|
||||
stamp.set("Rotate", rotation);
|
||||
|
||||
if (user) {
|
||||
stamp.set(
|
||||
"T",
|
||||
isAscii(user) ? user : stringToUTF16String(user, /* bigEndian = */ true)
|
||||
);
|
||||
}
|
||||
|
||||
if (apRef || ap) {
|
||||
const n = new Dict(xref);
|
||||
stamp.set("AP", n);
|
||||
|
||||
if (apRef) {
|
||||
n.set("N", apRef);
|
||||
} else {
|
||||
n.set("N", ap);
|
||||
}
|
||||
}
|
||||
|
||||
return stamp;
|
||||
}
|
||||
|
||||
static async createNewAppearanceStream(annotation, xref, params) {
|
||||
const { rotation } = annotation;
|
||||
const { imageRef, width, height } = params;
|
||||
const resources = new Dict(xref);
|
||||
const xobject = new Dict(xref);
|
||||
resources.set("XObject", xobject);
|
||||
xobject.set("Im0", imageRef);
|
||||
const appearance = `q ${width} 0 0 ${height} 0 0 cm /Im0 Do Q`;
|
||||
|
||||
const appearanceStreamDict = new Dict(xref);
|
||||
appearanceStreamDict.set("FormType", 1);
|
||||
appearanceStreamDict.set("Subtype", Name.get("Form"));
|
||||
appearanceStreamDict.set("Type", Name.get("XObject"));
|
||||
appearanceStreamDict.set("BBox", [0, 0, width, height]);
|
||||
appearanceStreamDict.set("Resources", resources);
|
||||
|
||||
if (rotation) {
|
||||
const matrix = getRotationMatrix(rotation, width, height);
|
||||
appearanceStreamDict.set("Matrix", matrix);
|
||||
}
|
||||
|
||||
const ap = new StringStream(appearance);
|
||||
ap.dict = appearanceStreamDict;
|
||||
|
||||
return ap;
|
||||
}
|
||||
}
|
||||
|
||||
class FileAttachmentAnnotation extends MarkupAnnotation {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue