mirror of
https://github.com/mozilla/pdf.js.git
synced 2025-04-22 16:18:08 +02:00
[api-minor] Don't normalize the text used in the text layer.
Some arabic chars like \ufe94 could be searched in a pdf, hence it must be normalized when creating the search query. So to avoid to duplicate the normalization code, everything is moved in the find controller. The previous code to normalize text was using NFKC but with a hardcoded map, hence it has been replaced by the use of normalize("NFKC") (it helps to reduce the bundle size by 30kb). In playing with this \ufe94 char, I noticed that the bidi algorithm wasn't taking into account some RTL unicode ranges, the generated font wasn't embedding the mapping this char and the unicode ranges in the OS/2 table weren't up-to-date. When normalized some chars can be replaced by several ones and it induced to have some extra chars in the text layer. To avoid any regression, when copying some text from the text layer, a copied string is normalized (NFKC) before being put in the clipboard (it works like this in either Acrobat or Chrome).
This commit is contained in:
parent
3e08eee511
commit
117bbf7cd9
22 changed files with 447 additions and 1672 deletions
|
@ -147,7 +147,11 @@ function bidi(str, startLevel = -1, vertical = false) {
|
|||
if (!charType) {
|
||||
warn("Bidi: invalid Unicode character " + charCode.toString(16));
|
||||
}
|
||||
} else if (0x0700 <= charCode && charCode <= 0x08ac) {
|
||||
} else if (
|
||||
(0x0700 <= charCode && charCode <= 0x08ac) ||
|
||||
(0xfb50 <= charCode && charCode <= 0xfdff) ||
|
||||
(0xfe70 <= charCode && charCode <= 0xfeff)
|
||||
) {
|
||||
charType = "AL";
|
||||
}
|
||||
if (charType === "R" || charType === "AL" || charType === "AN") {
|
||||
|
|
|
@ -511,7 +511,13 @@ class Page {
|
|||
});
|
||||
}
|
||||
|
||||
extractTextContent({ handler, task, includeMarkedContent, sink }) {
|
||||
extractTextContent({
|
||||
handler,
|
||||
task,
|
||||
includeMarkedContent,
|
||||
disableNormalization,
|
||||
sink,
|
||||
}) {
|
||||
const contentStreamPromise = this.getContentStream();
|
||||
const resourcesPromise = this.loadResources([
|
||||
"ExtGState",
|
||||
|
@ -539,6 +545,7 @@ class Page {
|
|||
task,
|
||||
resources: this.resources,
|
||||
includeMarkedContent,
|
||||
disableNormalization,
|
||||
sink,
|
||||
viewBox: this.view,
|
||||
});
|
||||
|
|
|
@ -24,6 +24,7 @@ import {
|
|||
IDENTITY_MATRIX,
|
||||
info,
|
||||
isArrayEqual,
|
||||
normalizeUnicode,
|
||||
OPS,
|
||||
shadow,
|
||||
stringToPDFString,
|
||||
|
@ -2271,6 +2272,7 @@ class PartialEvaluator {
|
|||
seenStyles = new Set(),
|
||||
viewBox,
|
||||
markedContentData = null,
|
||||
disableNormalization = false,
|
||||
}) {
|
||||
// Ensure that `resources`/`stateManager` is correctly initialized,
|
||||
// even if the provided parameter is e.g. `null`.
|
||||
|
@ -2524,7 +2526,10 @@ class PartialEvaluator {
|
|||
}
|
||||
|
||||
function runBidiTransform(textChunk) {
|
||||
const text = textChunk.str.join("");
|
||||
let text = textChunk.str.join("");
|
||||
if (!disableNormalization) {
|
||||
text = normalizeUnicode(text);
|
||||
}
|
||||
const bidiResult = bidi(text, -1, textChunk.vertical);
|
||||
return {
|
||||
str: bidiResult.str,
|
||||
|
@ -2859,7 +2864,7 @@ class PartialEvaluator {
|
|||
textChunk.prevTransform = getCurrentTextTransform();
|
||||
}
|
||||
|
||||
const glyphUnicode = glyph.normalizedUnicode;
|
||||
const glyphUnicode = glyph.unicode;
|
||||
if (saveLastChar(glyphUnicode)) {
|
||||
// The two last chars are a non-whitespace followed by a whitespace
|
||||
// and then this non-whitespace, so we insert a whitespace here.
|
||||
|
@ -3242,6 +3247,7 @@ class PartialEvaluator {
|
|||
seenStyles,
|
||||
viewBox,
|
||||
markedContentData,
|
||||
disableNormalization,
|
||||
})
|
||||
.then(function () {
|
||||
if (!sinkWrapper.enqueueInvoked) {
|
||||
|
|
|
@ -33,11 +33,9 @@ import {
|
|||
} from "./fonts_utils.js";
|
||||
import {
|
||||
getCharUnicodeCategory,
|
||||
getNormalizedUnicodes,
|
||||
getUnicodeForGlyph,
|
||||
getUnicodeRangeFor,
|
||||
mapSpecialUnicodeValues,
|
||||
reverseIfRtl,
|
||||
} from "./unicode.js";
|
||||
import { getDingbatsGlyphsUnicode, getGlyphsUnicode } from "./glyphlist.js";
|
||||
import {
|
||||
|
@ -277,24 +275,6 @@ class Glyph {
|
|||
/* nonSerializable = */ true
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* This property, which is only used by `PartialEvaluator.getTextContent`,
|
||||
* is purposely made non-serializable.
|
||||
* @type {string}
|
||||
*/
|
||||
get normalizedUnicode() {
|
||||
return shadow(
|
||||
this,
|
||||
"normalizedUnicode",
|
||||
reverseIfRtl(Glyph._NormalizedUnicodes[this.unicode] || this.unicode),
|
||||
/* nonSerializable = */ true
|
||||
);
|
||||
}
|
||||
|
||||
static get _NormalizedUnicodes() {
|
||||
return shadow(this, "_NormalizedUnicodes", getNormalizedUnicodes());
|
||||
}
|
||||
}
|
||||
|
||||
function int16(b0, b1) {
|
||||
|
@ -507,6 +487,9 @@ function adjustMapping(charCodeToGlyphId, hasGlyph, newGlyphZeroId, toUnicode) {
|
|||
const privateUseOffetStart = PRIVATE_USE_AREAS[privateUseAreaIndex][0];
|
||||
let nextAvailableFontCharCode = privateUseOffetStart;
|
||||
let privateUseOffetEnd = PRIVATE_USE_AREAS[privateUseAreaIndex][1];
|
||||
const isInPrivateArea = code =>
|
||||
(PRIVATE_USE_AREAS[0][0] <= code && code <= PRIVATE_USE_AREAS[0][1]) ||
|
||||
(PRIVATE_USE_AREAS[1][0] <= code && code <= PRIVATE_USE_AREAS[1][1]);
|
||||
for (let originalCharCode in charCodeToGlyphId) {
|
||||
originalCharCode |= 0;
|
||||
let glyphId = charCodeToGlyphId[originalCharCode];
|
||||
|
@ -539,11 +522,7 @@ function adjustMapping(charCodeToGlyphId, hasGlyph, newGlyphZeroId, toUnicode) {
|
|||
if (typeof unicode === "string") {
|
||||
unicode = unicode.codePointAt(0);
|
||||
}
|
||||
if (
|
||||
unicode &&
|
||||
unicode < privateUseOffetStart &&
|
||||
!usedGlyphIds.has(glyphId)
|
||||
) {
|
||||
if (unicode && !isInPrivateArea(unicode) && !usedGlyphIds.has(glyphId)) {
|
||||
toUnicodeExtraMap.set(unicode, glyphId);
|
||||
usedGlyphIds.add(glyphId);
|
||||
}
|
||||
|
@ -785,6 +764,7 @@ function createOS2Table(properties, charstrings, override) {
|
|||
|
||||
let firstCharIndex = null;
|
||||
let lastCharIndex = 0;
|
||||
let position = -1;
|
||||
|
||||
if (charstrings) {
|
||||
for (let code in charstrings) {
|
||||
|
@ -796,7 +776,7 @@ function createOS2Table(properties, charstrings, override) {
|
|||
lastCharIndex = code;
|
||||
}
|
||||
|
||||
const position = getUnicodeRangeFor(code);
|
||||
position = getUnicodeRangeFor(code, position);
|
||||
if (position < 32) {
|
||||
ulUnicodeRange1 |= 1 << position;
|
||||
} else if (position < 64) {
|
||||
|
|
1685
src/core/unicode.js
1685
src/core/unicode.js
File diff suppressed because it is too large
Load diff
|
@ -745,7 +745,7 @@ class WorkerMessageHandler {
|
|||
});
|
||||
|
||||
handler.on("GetTextContent", function (data, sink) {
|
||||
const { pageIndex, includeMarkedContent } = data;
|
||||
const { pageIndex, includeMarkedContent, disableNormalization } = data;
|
||||
|
||||
pdfManager.getPage(pageIndex).then(function (page) {
|
||||
const task = new WorkerTask("GetTextContent: page " + pageIndex);
|
||||
|
@ -760,6 +760,7 @@ class WorkerMessageHandler {
|
|||
task,
|
||||
sink,
|
||||
includeMarkedContent,
|
||||
disableNormalization,
|
||||
})
|
||||
.then(
|
||||
function () {
|
||||
|
|
|
@ -1122,6 +1122,8 @@ class PDFDocumentProxy {
|
|||
* @typedef {Object} getTextContentParameters
|
||||
* @property {boolean} [includeMarkedContent] - When true include marked
|
||||
* content items in the items array of TextContent. The default is `false`.
|
||||
* @property {boolean} [disableNormalization] - When true the text is *not*
|
||||
* normalized in the worker-thread. The default is `false`.
|
||||
*/
|
||||
|
||||
/**
|
||||
|
@ -1598,7 +1600,10 @@ class PDFPageProxy {
|
|||
* @param {getTextContentParameters} params - getTextContent parameters.
|
||||
* @returns {ReadableStream} Stream for reading text content chunks.
|
||||
*/
|
||||
streamTextContent({ includeMarkedContent = false } = {}) {
|
||||
streamTextContent({
|
||||
includeMarkedContent = false,
|
||||
disableNormalization = false,
|
||||
} = {}) {
|
||||
const TEXT_CONTENT_CHUNK_SIZE = 100;
|
||||
|
||||
return this._transport.messageHandler.sendWithStream(
|
||||
|
@ -1606,6 +1611,7 @@ class PDFPageProxy {
|
|||
{
|
||||
pageIndex: this._pageIndex,
|
||||
includeMarkedContent: includeMarkedContent === true,
|
||||
disableNormalization: disableNormalization === true,
|
||||
},
|
||||
{
|
||||
highWaterMark: TEXT_CONTENT_CHUNK_SIZE,
|
||||
|
|
|
@ -35,6 +35,7 @@ import {
|
|||
FeatureTest,
|
||||
InvalidPDFException,
|
||||
MissingPDFException,
|
||||
normalizeUnicode,
|
||||
OPS,
|
||||
PasswordResponses,
|
||||
PermissionFlag,
|
||||
|
@ -100,6 +101,7 @@ export {
|
|||
isPdfFile,
|
||||
loadScript,
|
||||
MissingPDFException,
|
||||
normalizeUnicode,
|
||||
OPS,
|
||||
PasswordResponses,
|
||||
PDFDataRangeTransport,
|
||||
|
|
|
@ -1026,6 +1026,25 @@ function createPromiseCapability() {
|
|||
return capability;
|
||||
}
|
||||
|
||||
let NormalizeRegex = null;
|
||||
let NormalizationMap = null;
|
||||
function normalizeUnicode(str) {
|
||||
if (!NormalizeRegex) {
|
||||
// In order to generate the following regex:
|
||||
// - create a PDF containing all the chars in the range 0000-FFFF with
|
||||
// a NFKC which is different of the char.
|
||||
// - copy and paste all those chars and get the ones where NFKC is
|
||||
// required.
|
||||
// It appears that most the chars here contain some ligatures.
|
||||
NormalizeRegex =
|
||||
/([\u00a0\u00b5\u037e\u0eb3\u2000-\u200a\u202f\u2126\ufb00-\ufb04\ufb06\ufb20-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufba1\ufba4-\ufba9\ufbae-\ufbb1\ufbd3-\ufbdc\ufbde-\ufbe7\ufbea-\ufbf8\ufbfc-\ufbfd\ufc00-\ufc5d\ufc64-\ufcf1\ufcf5-\ufd3d\ufd88\ufdf4\ufdfa-\ufdfb\ufe71\ufe77\ufe79\ufe7b\ufe7d]+)|(\ufb05+)/gu;
|
||||
NormalizationMap = new Map([["ſt", "ſt"]]);
|
||||
}
|
||||
return str.replaceAll(NormalizeRegex, (_, p1, p2) => {
|
||||
return p1 ? p1.normalize("NFKC") : NormalizationMap.get(p2);
|
||||
});
|
||||
}
|
||||
|
||||
export {
|
||||
AbortException,
|
||||
AnnotationActionEventType,
|
||||
|
@ -1064,6 +1083,7 @@ export {
|
|||
LINE_FACTOR,
|
||||
MAX_IMAGE_SIZE_TO_CACHE,
|
||||
MissingPDFException,
|
||||
normalizeUnicode,
|
||||
objectFromMap,
|
||||
objectSize,
|
||||
OPS,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue