mirror of
https://github.com/mozilla/pdf.js.git
synced 2025-04-20 15:18:08 +02:00
[api-minor] Don't normalize the text used in the text layer.
Some arabic chars like \ufe94 could be searched in a pdf, hence it must be normalized when creating the search query. So to avoid to duplicate the normalization code, everything is moved in the find controller. The previous code to normalize text was using NFKC but with a hardcoded map, hence it has been replaced by the use of normalize("NFKC") (it helps to reduce the bundle size by 30kb). In playing with this \ufe94 char, I noticed that the bidi algorithm wasn't taking into account some RTL unicode ranges, the generated font wasn't embedding the mapping this char and the unicode ranges in the OS/2 table weren't up-to-date. When normalized some chars can be replaced by several ones and it induced to have some extra chars in the text layer. To avoid any regression, when copying some text from the text layer, a copied string is normalized (NFKC) before being put in the clipboard (it works like this in either Acrobat or Chrome).
This commit is contained in:
parent
3e08eee511
commit
117bbf7cd9
22 changed files with 447 additions and 1672 deletions
|
@ -693,6 +693,7 @@ class Driver {
|
|||
initPromise = page
|
||||
.getTextContent({
|
||||
includeMarkedContent: true,
|
||||
disableNormalization: true,
|
||||
})
|
||||
.then(function (textContent) {
|
||||
return Rasterize.textLayer(
|
||||
|
|
|
@ -28,7 +28,7 @@ describe("Copy and paste", () => {
|
|||
await closePages(pages);
|
||||
});
|
||||
|
||||
it("must check that we've all the contents", async () => {
|
||||
it("must check that we've all the contents on copy/paste", async () => {
|
||||
await Promise.all(
|
||||
pages.map(async ([browserName, page]) => {
|
||||
await page.keyboard.down("Control");
|
||||
|
@ -117,4 +117,47 @@ describe("Copy and paste", () => {
|
|||
);
|
||||
});
|
||||
});
|
||||
describe("all text", () => {
|
||||
let pages;
|
||||
|
||||
beforeAll(async () => {
|
||||
pages = await loadAndWait("copy_paste_ligatures.pdf", ".textLayer");
|
||||
await mockClipboard(pages);
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
await closePages(pages);
|
||||
});
|
||||
|
||||
it("must check that we've all the contents on copy/paste", async () => {
|
||||
await Promise.all(
|
||||
pages.map(async ([browserName, page]) => {
|
||||
await page.keyboard.down("Control");
|
||||
await page.keyboard.press("a");
|
||||
await page.keyboard.up("Control");
|
||||
|
||||
await page.waitForTimeout(100);
|
||||
|
||||
await page.keyboard.down("Control");
|
||||
await page.keyboard.press("c");
|
||||
await page.keyboard.up("Control");
|
||||
|
||||
await page.waitForTimeout(100);
|
||||
|
||||
await page.waitForFunction(
|
||||
`document.querySelector('#viewerContainer').style.cursor !== "wait"`
|
||||
);
|
||||
|
||||
const text = await page.evaluate(() =>
|
||||
navigator.clipboard.readText()
|
||||
);
|
||||
|
||||
expect(!!text).withContext(`In ${browserName}`).toEqual(true);
|
||||
expect(text)
|
||||
.withContext(`In ${browserName}`)
|
||||
.toEqual("abcdeffffiflffifflſtstghijklmno");
|
||||
})
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
1
test/pdfs/.gitignore
vendored
1
test/pdfs/.gitignore
vendored
|
@ -585,3 +585,4 @@
|
|||
!issue16221.pdf
|
||||
!issue16224.pdf
|
||||
!issue16278.pdf
|
||||
!copy_paste_ligatures.pdf
|
||||
|
|
BIN
test/pdfs/copy_paste_ligatures.pdf
Executable file
BIN
test/pdfs/copy_paste_ligatures.pdf
Executable file
Binary file not shown.
|
@ -2340,7 +2340,9 @@ page 1 / 3`);
|
|||
);
|
||||
const pdfDoc = await loadingTask.promise;
|
||||
const pdfPage = await pdfDoc.getPage(1);
|
||||
const { items, styles } = await pdfPage.getTextContent();
|
||||
const { items, styles } = await pdfPage.getTextContent({
|
||||
disableNormalization: true,
|
||||
});
|
||||
expect(items.length).toEqual(1);
|
||||
// Font name will be a random object id.
|
||||
const fontName = items[0].fontName;
|
||||
|
@ -2376,7 +2378,9 @@ page 1 / 3`);
|
|||
const loadingTask = getDocument(buildGetDocumentParams("issue13226.pdf"));
|
||||
const pdfDoc = await loadingTask.promise;
|
||||
const pdfPage = await pdfDoc.getPage(1);
|
||||
const { items } = await pdfPage.getTextContent();
|
||||
const { items } = await pdfPage.getTextContent({
|
||||
disableNormalization: true,
|
||||
});
|
||||
const text = mergeText(items);
|
||||
|
||||
expect(text).toEqual(
|
||||
|
@ -2394,7 +2398,9 @@ page 1 / 3`);
|
|||
const loadingTask = getDocument(buildGetDocumentParams("issue16119.pdf"));
|
||||
const pdfDoc = await loadingTask.promise;
|
||||
const pdfPage = await pdfDoc.getPage(1);
|
||||
const { items } = await pdfPage.getTextContent();
|
||||
const { items } = await pdfPage.getTextContent({
|
||||
disableNormalization: true,
|
||||
});
|
||||
const text = mergeText(items);
|
||||
|
||||
expect(
|
||||
|
@ -2410,7 +2416,9 @@ page 1 / 3`);
|
|||
const loadingTask = getDocument(buildGetDocumentParams("issue13201.pdf"));
|
||||
const pdfDoc = await loadingTask.promise;
|
||||
const pdfPage = await pdfDoc.getPage(1);
|
||||
const { items } = await pdfPage.getTextContent();
|
||||
const { items } = await pdfPage.getTextContent({
|
||||
disableNormalization: true,
|
||||
});
|
||||
const text = mergeText(items);
|
||||
|
||||
expect(
|
||||
|
@ -2436,7 +2444,9 @@ page 1 / 3`);
|
|||
const loadingTask = getDocument(buildGetDocumentParams("issue11913.pdf"));
|
||||
const pdfDoc = await loadingTask.promise;
|
||||
const pdfPage = await pdfDoc.getPage(1);
|
||||
const { items } = await pdfPage.getTextContent();
|
||||
const { items } = await pdfPage.getTextContent({
|
||||
disableNormalization: true,
|
||||
});
|
||||
const text = mergeText(items);
|
||||
|
||||
expect(
|
||||
|
@ -2456,7 +2466,9 @@ page 1 / 3`);
|
|||
const loadingTask = getDocument(buildGetDocumentParams("issue10900.pdf"));
|
||||
const pdfDoc = await loadingTask.promise;
|
||||
const pdfPage = await pdfDoc.getPage(1);
|
||||
const { items } = await pdfPage.getTextContent();
|
||||
const { items } = await pdfPage.getTextContent({
|
||||
disableNormalization: true,
|
||||
});
|
||||
const text = mergeText(items);
|
||||
|
||||
expect(
|
||||
|
@ -2475,11 +2487,27 @@ page 1 / 3`);
|
|||
const loadingTask = getDocument(buildGetDocumentParams("issue10640.pdf"));
|
||||
const pdfDoc = await loadingTask.promise;
|
||||
const pdfPage = await pdfDoc.getPage(1);
|
||||
const { items } = await pdfPage.getTextContent();
|
||||
const text = mergeText(items);
|
||||
let { items } = await pdfPage.getTextContent({
|
||||
disableNormalization: true,
|
||||
});
|
||||
let text = mergeText(items);
|
||||
let expected = `Open Sans is a humanist sans serif typeface designed by Steve Matteson.
|
||||
Open Sans was designed with an upright stress, open forms and a neu-
|
||||
tral, yet friendly appearance. It was optimized for print, web, and mobile
|
||||
interfaces, and has excellent legibility characteristics in its letterforms (see
|
||||
figure \x81 on the following page). This font is available from the Google Font
|
||||
Directory [\x81] as TrueType files licensed under the Apache License version \x82.\x80.
|
||||
This package provides support for this font in LATEX. It includes Type \x81
|
||||
versions of the fonts, converted for this package using FontForge from its
|
||||
sources, for full support with Dvips.`;
|
||||
|
||||
expect(
|
||||
text.includes(`Open Sans is a humanist sans serif typeface designed by Steve Matteson.
|
||||
expect(text.includes(expected)).toEqual(true);
|
||||
|
||||
({ items } = await pdfPage.getTextContent({
|
||||
disableNormalization: false,
|
||||
}));
|
||||
text = mergeText(items);
|
||||
expected = `Open Sans is a humanist sans serif typeface designed by Steve Matteson.
|
||||
Open Sans was designed with an upright stress, open forms and a neu-
|
||||
tral, yet friendly appearance. It was optimized for print, web, and mobile
|
||||
interfaces, and has excellent legibility characteristics in its letterforms (see
|
||||
|
@ -2487,8 +2515,8 @@ figure \x81 on the following page). This font is available from the Google Font
|
|||
Directory [\x81] as TrueType files licensed under the Apache License version \x82.\x80.
|
||||
This package provides support for this font in LATEX. It includes Type \x81
|
||||
versions of the fonts, converted for this package using FontForge from its
|
||||
sources, for full support with Dvips.`)
|
||||
).toEqual(true);
|
||||
sources, for full support with Dvips.`;
|
||||
expect(text.includes(expected)).toEqual(true);
|
||||
|
||||
await loadingTask.destroy();
|
||||
});
|
||||
|
@ -2501,7 +2529,9 @@ sources, for full support with Dvips.`)
|
|||
const loadingTask = getDocument(buildGetDocumentParams("bug931481.pdf"));
|
||||
const pdfDoc = await loadingTask.promise;
|
||||
const pdfPage = await pdfDoc.getPage(1);
|
||||
const { items } = await pdfPage.getTextContent();
|
||||
const { items } = await pdfPage.getTextContent({
|
||||
disableNormalization: true,
|
||||
});
|
||||
const text = mergeText(items);
|
||||
|
||||
expect(
|
||||
|
@ -2529,7 +2559,9 @@ sozialökonomische Gerechtigkeit.`)
|
|||
const loadingTask = getDocument(buildGetDocumentParams("issue9186.pdf"));
|
||||
const pdfDoc = await loadingTask.promise;
|
||||
const pdfPage = await pdfDoc.getPage(1);
|
||||
const { items } = await pdfPage.getTextContent();
|
||||
const { items } = await pdfPage.getTextContent({
|
||||
disableNormalization: true,
|
||||
});
|
||||
const text = mergeText(items);
|
||||
|
||||
expect(
|
||||
|
@ -2550,7 +2582,9 @@ Caron Broadcasting, Inc., an Ohio corporation (“Lessee”).`)
|
|||
);
|
||||
const pdfDoc = await loadingTask.promise;
|
||||
const pdfPage = await pdfDoc.getPage(1);
|
||||
const { items } = await pdfPage.getTextContent();
|
||||
const { items } = await pdfPage.getTextContent({
|
||||
disableNormalization: true,
|
||||
});
|
||||
const text = mergeText(items);
|
||||
|
||||
expect(text).toEqual(
|
||||
|
@ -2568,7 +2602,9 @@ Caron Broadcasting, Inc., an Ohio corporation (“Lessee”).`)
|
|||
const loadingTask = getDocument(buildGetDocumentParams("bug1755201.pdf"));
|
||||
const pdfDoc = await loadingTask.promise;
|
||||
const pdfPage = await pdfDoc.getPage(6);
|
||||
const { items } = await pdfPage.getTextContent();
|
||||
const { items } = await pdfPage.getTextContent({
|
||||
disableNormalization: true,
|
||||
});
|
||||
const text = mergeText(items);
|
||||
|
||||
expect(/win aisle/.test(text)).toEqual(false);
|
||||
|
@ -2586,10 +2622,12 @@ Caron Broadcasting, Inc., an Ohio corporation (“Lessee”).`)
|
|||
const pdfPage = await pdfDoc.getPage(568);
|
||||
let { items } = await pdfPage.getTextContent({
|
||||
includeMarkedContent: false,
|
||||
disableNormalization: true,
|
||||
});
|
||||
const textWithoutMC = mergeText(items);
|
||||
({ items } = await pdfPage.getTextContent({
|
||||
includeMarkedContent: true,
|
||||
disableNormalization: true,
|
||||
}));
|
||||
const textWithMC = mergeText(items);
|
||||
|
||||
|
@ -2607,7 +2645,9 @@ Caron Broadcasting, Inc., an Ohio corporation (“Lessee”).`)
|
|||
);
|
||||
const pdfDoc = await loadingTask.promise;
|
||||
const pdfPage = await pdfDoc.getPage(1);
|
||||
const { items } = await pdfPage.getTextContent();
|
||||
const { items } = await pdfPage.getTextContent({
|
||||
disableNormalization: true,
|
||||
});
|
||||
const text = mergeText(items);
|
||||
|
||||
expect(text).toEqual("𠮷");
|
||||
|
@ -2619,7 +2659,9 @@ Caron Broadcasting, Inc., an Ohio corporation (“Lessee”).`)
|
|||
const loadingTask = getDocument(buildGetDocumentParams("issue16221.pdf"));
|
||||
const pdfDoc = await loadingTask.promise;
|
||||
const pdfPage = await pdfDoc.getPage(1);
|
||||
const { items } = await pdfPage.getTextContent();
|
||||
const { items } = await pdfPage.getTextContent({
|
||||
disableNormalization: true,
|
||||
});
|
||||
|
||||
expect(items.map(i => i.str)).toEqual(["Hello ", "World"]);
|
||||
|
||||
|
|
|
@ -542,7 +542,7 @@ describe("pdf_find_controller", function () {
|
|||
pageIndex: 0,
|
||||
matchIndex: 0,
|
||||
},
|
||||
pageMatches: [[2743]],
|
||||
pageMatches: [[2734]],
|
||||
pageMatchesLength: [[14]],
|
||||
});
|
||||
});
|
||||
|
@ -561,7 +561,7 @@ describe("pdf_find_controller", function () {
|
|||
pageIndex: 1,
|
||||
matchIndex: 0,
|
||||
},
|
||||
pageMatches: [[], [1493]],
|
||||
pageMatches: [[], [1486]],
|
||||
pageMatchesLength: [[], [11]],
|
||||
});
|
||||
});
|
||||
|
@ -594,7 +594,7 @@ describe("pdf_find_controller", function () {
|
|||
[],
|
||||
[],
|
||||
[],
|
||||
[2087],
|
||||
[2081],
|
||||
],
|
||||
pageMatchesLength: [
|
||||
[24],
|
||||
|
@ -629,7 +629,7 @@ describe("pdf_find_controller", function () {
|
|||
pageIndex: 0,
|
||||
matchIndex: 0,
|
||||
},
|
||||
pageMatches: [[1501]],
|
||||
pageMatches: [[1497]],
|
||||
pageMatchesLength: [[25]],
|
||||
});
|
||||
});
|
||||
|
@ -670,7 +670,7 @@ describe("pdf_find_controller", function () {
|
|||
pageIndex: 0,
|
||||
matchIndex: 0,
|
||||
},
|
||||
pageMatches: [[1946]],
|
||||
pageMatches: [[1941]],
|
||||
pageMatchesLength: [[21]],
|
||||
});
|
||||
});
|
||||
|
@ -692,7 +692,7 @@ describe("pdf_find_controller", function () {
|
|||
pageIndex: 0,
|
||||
matchIndex: 0,
|
||||
},
|
||||
pageMatches: [[1946]],
|
||||
pageMatches: [[1941]],
|
||||
pageMatchesLength: [[23]],
|
||||
});
|
||||
});
|
||||
|
@ -712,7 +712,7 @@ describe("pdf_find_controller", function () {
|
|||
pageIndex: 0,
|
||||
matchIndex: 0,
|
||||
},
|
||||
pageMatches: [[1946]],
|
||||
pageMatches: [[1941]],
|
||||
pageMatchesLength: [[23]],
|
||||
});
|
||||
});
|
||||
|
@ -976,4 +976,61 @@ describe("pdf_find_controller", function () {
|
|||
pageMatchesLength: [[5, 5]],
|
||||
});
|
||||
});
|
||||
|
||||
it("performs a search in a text with some arabic chars in different unicode ranges but with same normalized form", async function () {
|
||||
const { eventBus, pdfFindController } = await initPdfFindController(
|
||||
"ArabicCIDTrueType.pdf"
|
||||
);
|
||||
|
||||
await testSearch({
|
||||
eventBus,
|
||||
pdfFindController,
|
||||
state: {
|
||||
query: "\u0629",
|
||||
},
|
||||
matchesPerPage: [4],
|
||||
selectedMatch: {
|
||||
pageIndex: 0,
|
||||
matchIndex: 0,
|
||||
},
|
||||
pageMatches: [[6, 25, 44, 63]],
|
||||
pageMatchesLength: [[1, 1, 1, 1]],
|
||||
});
|
||||
|
||||
await testSearch({
|
||||
eventBus,
|
||||
pdfFindController,
|
||||
state: {
|
||||
query: "\ufe94",
|
||||
},
|
||||
matchesPerPage: [4],
|
||||
selectedMatch: {
|
||||
pageIndex: 0,
|
||||
matchIndex: 0,
|
||||
},
|
||||
pageMatches: [[6, 25, 44, 63]],
|
||||
pageMatchesLength: [[1, 1, 1, 1]],
|
||||
});
|
||||
});
|
||||
|
||||
it("performs a search in a text with some f ligatures", async function () {
|
||||
const { eventBus, pdfFindController } = await initPdfFindController(
|
||||
"copy_paste_ligatures.pdf"
|
||||
);
|
||||
|
||||
await testSearch({
|
||||
eventBus,
|
||||
pdfFindController,
|
||||
state: {
|
||||
query: "f",
|
||||
},
|
||||
matchesPerPage: [9],
|
||||
selectedMatch: {
|
||||
pageIndex: 0,
|
||||
matchIndex: 0,
|
||||
},
|
||||
pageMatches: [[5, 6, 6, 7, 8, 9, 9, 10, 10]],
|
||||
pageMatchesLength: [[1, 1, 1, 1, 1, 1, 1, 1, 1]],
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -15,11 +15,9 @@
|
|||
|
||||
import {
|
||||
getCharUnicodeCategory,
|
||||
getNormalizedUnicodes,
|
||||
getUnicodeForGlyph,
|
||||
getUnicodeRangeFor,
|
||||
mapSpecialUnicodeValues,
|
||||
reverseIfRtl,
|
||||
} from "../../src/core/unicode.js";
|
||||
import {
|
||||
getDingbatsGlyphsUnicode,
|
||||
|
@ -152,69 +150,12 @@ describe("unicode", function () {
|
|||
expect(getUnicodeRangeFor(0x0041)).toEqual(0);
|
||||
// fi (Alphabetic Presentation Forms)
|
||||
expect(getUnicodeRangeFor(0xfb01)).toEqual(62);
|
||||
// Combining diacritic (Cyrillic Extended-A)
|
||||
expect(getUnicodeRangeFor(0x2dff)).toEqual(9);
|
||||
});
|
||||
|
||||
it("should not get a Unicode range", function () {
|
||||
expect(getUnicodeRangeFor(0x05ff)).toEqual(-1);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getNormalizedUnicodes", function () {
|
||||
let NormalizedUnicodes;
|
||||
|
||||
beforeAll(function () {
|
||||
NormalizedUnicodes = getNormalizedUnicodes();
|
||||
});
|
||||
|
||||
afterAll(function () {
|
||||
NormalizedUnicodes = null;
|
||||
});
|
||||
|
||||
it("should get normalized Unicode values for ligatures", function () {
|
||||
// fi => f + i
|
||||
expect(NormalizedUnicodes["\uFB01"]).toEqual("fi");
|
||||
// Arabic
|
||||
expect(NormalizedUnicodes["\u0675"]).toEqual("\u0627\u0674");
|
||||
});
|
||||
|
||||
it("should not normalize standard characters", function () {
|
||||
expect(NormalizedUnicodes.A).toEqual(undefined);
|
||||
});
|
||||
});
|
||||
|
||||
describe("reverseIfRtl", function () {
|
||||
let NormalizedUnicodes;
|
||||
|
||||
function getGlyphUnicode(char) {
|
||||
if (NormalizedUnicodes[char] !== undefined) {
|
||||
return NormalizedUnicodes[char];
|
||||
}
|
||||
return char;
|
||||
}
|
||||
|
||||
beforeAll(function () {
|
||||
NormalizedUnicodes = getNormalizedUnicodes();
|
||||
});
|
||||
|
||||
afterAll(function () {
|
||||
NormalizedUnicodes = null;
|
||||
});
|
||||
|
||||
it("should not reverse LTR characters", function () {
|
||||
const A = getGlyphUnicode("A");
|
||||
expect(reverseIfRtl(A)).toEqual("A");
|
||||
|
||||
const fi = getGlyphUnicode("\uFB01");
|
||||
expect(reverseIfRtl(fi)).toEqual("fi");
|
||||
});
|
||||
|
||||
it("should reverse RTL characters", function () {
|
||||
// Hebrew (no-op, since it's not a combined character)
|
||||
const heAlef = getGlyphUnicode("\u05D0");
|
||||
expect(reverseIfRtl(heAlef)).toEqual("\u05D0");
|
||||
// Arabic
|
||||
const arAlef = getGlyphUnicode("\u0675");
|
||||
expect(reverseIfRtl(arAlef)).toEqual("\u0674\u0627");
|
||||
expect(getUnicodeRangeFor(0xaa60)).toEqual(-1);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue