mirror of
https://github.com/facebook/docusaurus.git
synced 2025-05-10 15:47:23 +02:00
feat(utils): JSDoc for all APIs (#6980)
* feat(utils): JSDoc for all APIs * fix tests
This commit is contained in:
parent
b8d2a4e84d
commit
2eeb0e46a2
31 changed files with 637 additions and 255 deletions
|
@ -5,10 +5,7 @@
|
|||
* LICENSE file in the root directory of this source tree.
|
||||
*/
|
||||
|
||||
import type {
|
||||
BrokenMarkdownLink,
|
||||
ContentPaths,
|
||||
} from '@docusaurus/utils/lib/markdownLinks';
|
||||
import type {BrokenMarkdownLink, ContentPaths} from '@docusaurus/utils';
|
||||
import type {BlogPostMetadata} from '@docusaurus/plugin-content-blog';
|
||||
import type {Metadata as BlogPaginatedMetadata} from '@theme/BlogListPage';
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ import type {
|
|||
DocsMarkdownOption,
|
||||
SourceToPermalink,
|
||||
VersionMetadata,
|
||||
BrokenMarkdownLink,
|
||||
DocBrokenMarkdownLink,
|
||||
} from '../../types';
|
||||
import {VERSIONED_DOCS_DIR, CURRENT_VERSION_NAME} from '../../constants';
|
||||
|
||||
|
@ -156,22 +156,22 @@ describe('linkify', () => {
|
|||
filePath: doc5,
|
||||
link: 'docNotExist1.md',
|
||||
contentPaths: versionCurrent,
|
||||
} as BrokenMarkdownLink);
|
||||
} as DocBrokenMarkdownLink);
|
||||
expect(onBrokenMarkdownLink).toHaveBeenNthCalledWith(2, {
|
||||
filePath: doc5,
|
||||
link: './docNotExist2.mdx',
|
||||
contentPaths: versionCurrent,
|
||||
} as BrokenMarkdownLink);
|
||||
} as DocBrokenMarkdownLink);
|
||||
expect(onBrokenMarkdownLink).toHaveBeenNthCalledWith(3, {
|
||||
filePath: doc5,
|
||||
link: '../docNotExist3.mdx',
|
||||
contentPaths: versionCurrent,
|
||||
} as BrokenMarkdownLink);
|
||||
} as DocBrokenMarkdownLink);
|
||||
expect(onBrokenMarkdownLink).toHaveBeenNthCalledWith(4, {
|
||||
filePath: doc5,
|
||||
link: './subdir/docNotExist4.md',
|
||||
contentPaths: versionCurrent,
|
||||
} as BrokenMarkdownLink);
|
||||
} as DocBrokenMarkdownLink);
|
||||
});
|
||||
|
||||
it('transforms absolute links in versioned docs', async () => {
|
||||
|
|
|
@ -20,7 +20,7 @@ import type {
|
|||
} from './types';
|
||||
|
||||
import _ from 'lodash';
|
||||
import {getElementsAround, toMessageRelativeFilePath} from '@docusaurus/utils';
|
||||
import {toMessageRelativeFilePath} from '@docusaurus/utils';
|
||||
import type {DocMetadataBase, DocNavLink} from '../types';
|
||||
|
||||
export function isCategoriesShorthand(
|
||||
|
@ -225,11 +225,11 @@ export function createSidebarsUtils(sidebars: Sidebars): SidebarsUtils {
|
|||
return {sidebarName, next: undefined, previous: undefined};
|
||||
}
|
||||
|
||||
const {previous, next} = getElementsAround(
|
||||
navigationItems,
|
||||
currentItemIndex,
|
||||
);
|
||||
return {sidebarName, previous, next};
|
||||
return {
|
||||
sidebarName,
|
||||
previous: navigationItems[currentItemIndex - 1],
|
||||
next: navigationItems[currentItemIndex + 1],
|
||||
};
|
||||
}
|
||||
|
||||
function getCategoryGeneratedIndexList(): SidebarItemCategoryWithGeneratedIndex[] {
|
||||
|
@ -268,11 +268,11 @@ export function createSidebarsUtils(sidebars: Sidebars): SidebarsUtils {
|
|||
const currentItemIndex = navigationItems.findIndex(
|
||||
isCurrentCategoryGeneratedIndexItem,
|
||||
);
|
||||
const {previous, next} = getElementsAround(
|
||||
navigationItems,
|
||||
currentItemIndex,
|
||||
);
|
||||
return {sidebarName, previous, next};
|
||||
return {
|
||||
sidebarName,
|
||||
previous: navigationItems[currentItemIndex - 1],
|
||||
next: navigationItems[currentItemIndex + 1],
|
||||
};
|
||||
}
|
||||
|
||||
function checkSidebarsDocIds(validDocIds: string[], sidebarFilePath: string) {
|
||||
|
|
|
@ -8,11 +8,12 @@
|
|||
/// <reference types="@docusaurus/module-type-aliases" />
|
||||
|
||||
import type {Sidebars} from './sidebars/types';
|
||||
import type {Tag, FrontMatterTag} from '@docusaurus/utils';
|
||||
import type {
|
||||
BrokenMarkdownLink as IBrokenMarkdownLink,
|
||||
Tag,
|
||||
FrontMatterTag,
|
||||
BrokenMarkdownLink,
|
||||
ContentPaths,
|
||||
} from '@docusaurus/utils/lib/markdownLinks';
|
||||
} from '@docusaurus/utils';
|
||||
import type {VersionBanner} from '@docusaurus/plugin-content-docs';
|
||||
|
||||
export type DocFile = {
|
||||
|
@ -133,11 +134,11 @@ export type LoadedContent = {
|
|||
loadedVersions: LoadedVersion[];
|
||||
};
|
||||
|
||||
export type BrokenMarkdownLink = IBrokenMarkdownLink<VersionMetadata>;
|
||||
export type DocBrokenMarkdownLink = BrokenMarkdownLink<VersionMetadata>;
|
||||
|
||||
export type DocsMarkdownOption = {
|
||||
versionsMetadata: VersionMetadata[];
|
||||
siteDir: string;
|
||||
sourceToPermalink: SourceToPermalink;
|
||||
onBrokenMarkdownLink: (brokenMarkdownLink: BrokenMarkdownLink) => void;
|
||||
onBrokenMarkdownLink: (brokenMarkdownLink: DocBrokenMarkdownLink) => void;
|
||||
};
|
||||
|
|
|
@ -898,7 +898,8 @@ declare module '@theme/TagsListByLetter' {
|
|||
}
|
||||
|
||||
declare module '@theme/TagsListInline' {
|
||||
export type Tag = Readonly<{label: string; permalink: string}>;
|
||||
import type {Tag} from '@docusaurus/utils';
|
||||
|
||||
export interface Props {
|
||||
readonly tags: readonly Tag[];
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
"dependencies": {
|
||||
"@docusaurus/logger": "2.0.0-beta.17",
|
||||
"@docusaurus/utils": "2.0.0-beta.17",
|
||||
"js-yaml": "^4.1.0",
|
||||
"joi": "^17.6.0",
|
||||
"tslib": "^2.3.1"
|
||||
},
|
||||
|
|
|
@ -18,11 +18,13 @@ const JoiFrontMatterString: Joi.Extension = {
|
|||
return {value};
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* Enhance the default Joi.string() type so that it can convert number to
|
||||
* Enhance the default `Joi.string()` type so that it can convert number to
|
||||
* strings. If user use front matter "tag: 2021", we shouldn't need to ask her
|
||||
* to write "tag: '2021'". Also yaml tries to convert patterns like "2019-01-01"
|
||||
* to dates automatically.
|
||||
*
|
||||
* @see https://github.com/facebook/docusaurus/issues/4642
|
||||
* @see https://github.com/sideway/joi/issues/1442#issuecomment-823997884
|
||||
*/
|
||||
|
|
|
@ -7,8 +7,10 @@
|
|||
|
||||
import type Joi from './Joi';
|
||||
import logger from '@docusaurus/logger';
|
||||
import Yaml from 'js-yaml';
|
||||
import {PluginIdSchema} from './validationSchemas';
|
||||
|
||||
/** Print warnings returned from Joi validation. */
|
||||
export function printWarning(warning?: Joi.ValidationError): void {
|
||||
if (warning) {
|
||||
const warningMessages = warning.details
|
||||
|
@ -18,9 +20,14 @@ export function printWarning(warning?: Joi.ValidationError): void {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The callback that should be used to validate plugin options. Handles plugin
|
||||
* IDs on a generic level: no matter what the schema declares, this callback
|
||||
* would require a string ID or default to "default".
|
||||
*/
|
||||
export function normalizePluginOptions<T extends {id?: string}>(
|
||||
schema: Joi.ObjectSchema<T>,
|
||||
// This allows us to automatically normalize undefined to {id: 'default'}
|
||||
// This allows us to automatically normalize undefined to { id: "default" }
|
||||
options: Partial<T> = {},
|
||||
): T {
|
||||
// All plugins can be provided an "id" option (multi-instance support)
|
||||
|
@ -41,6 +48,10 @@ export function normalizePluginOptions<T extends {id?: string}>(
|
|||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* The callback that should be used to validate theme config. No matter what the
|
||||
* schema declares, this callback would allow unknown attributes.
|
||||
*/
|
||||
export function normalizeThemeConfig<T>(
|
||||
schema: Joi.ObjectSchema<T>,
|
||||
themeConfig: Partial<T>,
|
||||
|
@ -62,6 +73,9 @@ export function normalizeThemeConfig<T>(
|
|||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate front matter with better error message
|
||||
*/
|
||||
export function validateFrontMatter<T>(
|
||||
frontMatter: Record<string, unknown>,
|
||||
schema: Joi.ObjectSchema<T>,
|
||||
|
@ -75,13 +89,13 @@ export function validateFrontMatter<T>(
|
|||
printWarning(warning);
|
||||
|
||||
if (error) {
|
||||
const frontMatterString = JSON.stringify(frontMatter, null, 2);
|
||||
const errorDetails = error.details;
|
||||
const invalidFields = errorDetails.map(({path}) => path).join(', ');
|
||||
|
||||
logger.error`The following front matter:
|
||||
${logger.yellow(frontMatterString)}
|
||||
contains invalid values for field(s): ${logger.yellow(invalidFields)}.
|
||||
---
|
||||
${Yaml.dump(frontMatter)}---
|
||||
contains invalid values for field(s): code=${invalidFields}.
|
||||
${errorDetails.map(({message}) => message)}
|
||||
`;
|
||||
throw error;
|
||||
|
|
|
@ -103,7 +103,7 @@ describe('createAbsoluteFilePathMatcher', () => {
|
|||
expect(() =>
|
||||
matcher('/bad/path/myDoc.md'),
|
||||
).toThrowErrorMatchingInlineSnapshot(
|
||||
`"createAbsoluteFilePathMatcher unexpected error, absoluteFilePath=/bad/path/myDoc.md was not contained in any of the root folders [\\"/_root/docs\\",\\"/root/_docs/\\",\\"/__test__/website/src\\"]"`,
|
||||
`"createAbsoluteFilePathMatcher unexpected error, absoluteFilePath=/bad/path/myDoc.md was not contained in any of the root folders: /_root/docs, /root/_docs/, /__test__/website/src"`,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -9,7 +9,6 @@ import {jest} from '@jest/globals';
|
|||
import {
|
||||
removeSuffix,
|
||||
removePrefix,
|
||||
getElementsAround,
|
||||
mapAsyncSequential,
|
||||
findAsyncSequential,
|
||||
reportMessage,
|
||||
|
@ -38,40 +37,6 @@ describe('removePrefix', () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe('getElementsAround', () => {
|
||||
it('returns elements around', () => {
|
||||
expect(getElementsAround(['a', 'b', 'c', 'd'], 0)).toEqual({
|
||||
previous: undefined,
|
||||
next: 'b',
|
||||
});
|
||||
expect(getElementsAround(['a', 'b', 'c', 'd'], 1)).toEqual({
|
||||
previous: 'a',
|
||||
next: 'c',
|
||||
});
|
||||
expect(getElementsAround(['a', 'b', 'c', 'd'], 2)).toEqual({
|
||||
previous: 'b',
|
||||
next: 'd',
|
||||
});
|
||||
expect(getElementsAround(['a', 'b', 'c', 'd'], 3)).toEqual({
|
||||
previous: 'c',
|
||||
next: undefined,
|
||||
});
|
||||
});
|
||||
|
||||
it('throws if bad index is provided', () => {
|
||||
expect(() =>
|
||||
getElementsAround(['a', 'b', 'c', 'd'], -1),
|
||||
).toThrowErrorMatchingInlineSnapshot(
|
||||
`"Valid \\"aroundIndex\\" for array (of size 4) are between 0 and 3, but you provided -1."`,
|
||||
);
|
||||
expect(() =>
|
||||
getElementsAround(['a', 'b', 'c', 'd'], 4),
|
||||
).toThrowErrorMatchingInlineSnapshot(
|
||||
`"Valid \\"aroundIndex\\" for array (of size 4) are between 0 and 3, but you provided 4."`,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('mapAsyncSequential', () => {
|
||||
function sleep(timeout: number): Promise<void> {
|
||||
return new Promise((resolve) => {
|
||||
|
|
|
@ -113,9 +113,13 @@ describe('createExcerpt', () => {
|
|||
import Component from '@site/src/components/Component'
|
||||
import './styles.css';
|
||||
|
||||
export function ItemCol(props) { return <Item {...props} className={'col col--6 margin-bottom--lg'}/> }
|
||||
export function ItemCol(props) {
|
||||
return <Item {...props} className={'col col--6 margin-bottom--lg'}/>
|
||||
}
|
||||
|
||||
export function ItemCol(props) { return <Item {...props} className={'col col--6 margin-bottom--lg'}/> };
|
||||
export function ItemCol(props) {
|
||||
return <Item {...props} className={'col col--6 margin-bottom--lg'}/>
|
||||
};
|
||||
|
||||
Lorem **ipsum** dolor sit \`amet\`[^1], consectetur _adipiscing_ elit. [**Vestibulum**](https://wiktionary.org/wiki/vestibulum) ex urna[^note], ~~molestie~~ et sagittis ut, varius ac justo :wink:.
|
||||
|
||||
|
@ -146,6 +150,18 @@ describe('createExcerpt', () => {
|
|||
`),
|
||||
).toBe('Lorem ipsum dolor sit amet, consectetur adipiscing elit.');
|
||||
});
|
||||
|
||||
it('creates excerpt after multi-line imports', () => {
|
||||
expect(
|
||||
createExcerpt(dedent`
|
||||
import React, {
|
||||
type ReactNode,
|
||||
} from 'react';
|
||||
|
||||
Lorem \`ipsum\` dolor sit amet, consectetur \`adipiscing elit\`.
|
||||
`),
|
||||
).toBe('Lorem ipsum dolor sit amet, consectetur adipiscing elit.');
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseMarkdownContentTitle', () => {
|
||||
|
|
|
@ -5,62 +5,60 @@
|
|||
* LICENSE file in the root directory of this source tree.
|
||||
*/
|
||||
|
||||
import {
|
||||
normalizeFrontMatterTag,
|
||||
normalizeFrontMatterTags,
|
||||
groupTaggedItems,
|
||||
type Tag,
|
||||
} from '../tags';
|
||||
|
||||
describe('normalizeFrontMatterTag', () => {
|
||||
type Input = Parameters<typeof normalizeFrontMatterTag>[1];
|
||||
type Output = ReturnType<typeof normalizeFrontMatterTag>;
|
||||
import {normalizeFrontMatterTags, groupTaggedItems, type Tag} from '../tags';
|
||||
|
||||
describe('normalizeFrontMatterTags', () => {
|
||||
it('normalizes simple string tag', () => {
|
||||
const tagsPath = '/all/tags';
|
||||
const input: Input = 'tag';
|
||||
const expectedOutput: Output = {
|
||||
const input = 'tag';
|
||||
const expectedOutput = {
|
||||
label: 'tag',
|
||||
permalink: `${tagsPath}/tag`,
|
||||
};
|
||||
expect(normalizeFrontMatterTag(tagsPath, input)).toEqual(expectedOutput);
|
||||
expect(normalizeFrontMatterTags(tagsPath, [input])).toEqual([
|
||||
expectedOutput,
|
||||
]);
|
||||
});
|
||||
|
||||
it('normalizes complex string tag', () => {
|
||||
const tagsPath = '/all/tags';
|
||||
const input: Input = 'some more Complex_tag';
|
||||
const expectedOutput: Output = {
|
||||
const input = 'some more Complex_tag';
|
||||
const expectedOutput = {
|
||||
label: 'some more Complex_tag',
|
||||
permalink: `${tagsPath}/some-more-complex-tag`,
|
||||
};
|
||||
expect(normalizeFrontMatterTag(tagsPath, input)).toEqual(expectedOutput);
|
||||
expect(normalizeFrontMatterTags(tagsPath, [input])).toEqual([
|
||||
expectedOutput,
|
||||
]);
|
||||
});
|
||||
|
||||
it('normalizes simple object tag', () => {
|
||||
const tagsPath = '/all/tags';
|
||||
const input: Input = {label: 'tag', permalink: 'tagPermalink'};
|
||||
const expectedOutput: Output = {
|
||||
const input = {label: 'tag', permalink: 'tagPermalink'};
|
||||
const expectedOutput = {
|
||||
label: 'tag',
|
||||
permalink: `${tagsPath}/tagPermalink`,
|
||||
};
|
||||
expect(normalizeFrontMatterTag(tagsPath, input)).toEqual(expectedOutput);
|
||||
expect(normalizeFrontMatterTags(tagsPath, [input])).toEqual([
|
||||
expectedOutput,
|
||||
]);
|
||||
});
|
||||
|
||||
it('normalizes complex string tag with object tag', () => {
|
||||
const tagsPath = '/all/tags';
|
||||
const input: Input = {
|
||||
const input = {
|
||||
label: 'tag complex Label',
|
||||
permalink: '/MoreComplex/Permalink',
|
||||
};
|
||||
const expectedOutput: Output = {
|
||||
const expectedOutput = {
|
||||
label: 'tag complex Label',
|
||||
permalink: `${tagsPath}/MoreComplex/Permalink`,
|
||||
};
|
||||
expect(normalizeFrontMatterTag(tagsPath, input)).toEqual(expectedOutput);
|
||||
expect(normalizeFrontMatterTags(tagsPath, [input])).toEqual([
|
||||
expectedOutput,
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('normalizeFrontMatterTags', () => {
|
||||
type Input = Parameters<typeof normalizeFrontMatterTags>[1];
|
||||
type Output = ReturnType<typeof normalizeFrontMatterTags>;
|
||||
|
||||
|
|
|
@ -5,34 +5,86 @@
|
|||
* LICENSE file in the root directory of this source tree.
|
||||
*/
|
||||
|
||||
/** Node major version, directly read from env. */
|
||||
export const NODE_MAJOR_VERSION = parseInt(
|
||||
process.versions.node.split('.')[0]!,
|
||||
10,
|
||||
);
|
||||
/** Node minor version, directly read from env. */
|
||||
export const NODE_MINOR_VERSION = parseInt(
|
||||
process.versions.node.split('.')[1]!,
|
||||
10,
|
||||
);
|
||||
|
||||
// Can be overridden with cli option --out-dir
|
||||
/**
|
||||
* Can be overridden with cli option `--out-dir`. Code should generally use
|
||||
* `context.outDir` instead (which is always absolute and localized).
|
||||
*/
|
||||
export const DEFAULT_BUILD_DIR_NAME = 'build';
|
||||
|
||||
// Can be overridden with cli option --config
|
||||
/**
|
||||
* Can be overridden with cli option `--config`. Code should generally use
|
||||
* `context.siteConfigPath` instead (which is always absolute).
|
||||
*/
|
||||
export const DEFAULT_CONFIG_FILE_NAME = 'docusaurus.config.js';
|
||||
|
||||
/** Can be absolute or relative to site directory. */
|
||||
export const BABEL_CONFIG_FILE_NAME =
|
||||
process.env.DOCUSAURUS_BABEL_CONFIG_FILE_NAME || 'babel.config.js';
|
||||
process.env.DOCUSAURUS_BABEL_CONFIG_FILE_NAME ?? 'babel.config.js';
|
||||
|
||||
/**
|
||||
* Can be absolute or relative to site directory. Code should generally use
|
||||
* `context.generatedFilesDir` instead (which is always absolute).
|
||||
*/
|
||||
export const GENERATED_FILES_DIR_NAME =
|
||||
process.env.DOCUSAURUS_GENERATED_FILES_DIR_NAME || '.docusaurus';
|
||||
process.env.DOCUSAURUS_GENERATED_FILES_DIR_NAME ?? '.docusaurus';
|
||||
|
||||
/**
|
||||
* We would assume all of the site's JS code lives in here and not outside.
|
||||
* Relative to the site directory.
|
||||
*/
|
||||
export const SRC_DIR_NAME = 'src';
|
||||
export const STATIC_DIR_NAME = 'static';
|
||||
export const OUTPUT_STATIC_ASSETS_DIR_NAME = 'assets'; // files handled by webpack, hashed (can be cached aggressively)
|
||||
|
||||
/**
|
||||
* Can be overridden with `config.staticDirectories`. Code should use
|
||||
* `context.siteConfig.staticDirectories` instead (which is always absolute).
|
||||
*/
|
||||
export const DEFAULT_STATIC_DIR_NAME = 'static';
|
||||
|
||||
/**
|
||||
* Files here are handled by webpack, hashed (can be cached aggressively).
|
||||
* Relative to the build output folder.
|
||||
*/
|
||||
export const OUTPUT_STATIC_ASSETS_DIR_NAME = 'assets';
|
||||
|
||||
/**
|
||||
* Components in this directory will receive the `@theme` alias and be able to
|
||||
* shadow default theme components.
|
||||
*/
|
||||
export const THEME_PATH = `${SRC_DIR_NAME}/theme`;
|
||||
|
||||
/**
|
||||
* All translation-related data live here, relative to site directory. Content
|
||||
* will be namespaced by locale.
|
||||
*/
|
||||
export const I18N_DIR_NAME = 'i18n';
|
||||
|
||||
/**
|
||||
* Translations for React code.
|
||||
*/
|
||||
export const CODE_TRANSLATIONS_FILE_NAME = 'code.json';
|
||||
|
||||
/** Dev server opens on this port by default. */
|
||||
export const DEFAULT_PORT = 3000;
|
||||
|
||||
/** Default plugin ID. */
|
||||
export const DEFAULT_PLUGIN_ID = 'default';
|
||||
|
||||
// Temporary fix for https://github.com/facebook/docusaurus/issues/5493
|
||||
/**
|
||||
* Allow overriding the limit after which the url loader will no longer inline
|
||||
* assets.
|
||||
*
|
||||
* @see https://github.com/facebook/docusaurus/issues/5493
|
||||
*/
|
||||
export const WEBPACK_URL_LOADER_LIMIT =
|
||||
process.env.WEBPACK_URL_LOADER_LIMIT ?? 10000;
|
||||
|
|
|
@ -13,15 +13,25 @@ import type {ContentPaths} from './markdownLinks';
|
|||
import logger from '@docusaurus/logger';
|
||||
|
||||
type DataFileParams = {
|
||||
/** Path to the potential data file, relative to `contentPaths` */
|
||||
filePath: string;
|
||||
/**
|
||||
* Includes the base path and localized path, both of which are eligible for
|
||||
* sourcing data files. Both paths should be absolute.
|
||||
*/
|
||||
contentPaths: ContentPaths;
|
||||
};
|
||||
|
||||
/**
|
||||
* Looks for a data file in the potential content paths; loads a localized data
|
||||
* file in priority.
|
||||
*
|
||||
* @returns An absolute path to the data file, or `undefined` if there isn't one.
|
||||
*/
|
||||
export async function getDataFilePath({
|
||||
filePath,
|
||||
contentPaths,
|
||||
}: DataFileParams): Promise<string | undefined> {
|
||||
// Loads a localized data file in priority
|
||||
const contentPath = await findFolderContainingFile(
|
||||
getContentPathList(contentPaths),
|
||||
filePath,
|
||||
|
@ -33,11 +43,17 @@ export async function getDataFilePath({
|
|||
}
|
||||
|
||||
/**
|
||||
* Looks up for a data file in the content paths, returns the normalized object.
|
||||
* Throws when validation fails; returns undefined when file not found
|
||||
* Looks up for a data file in the content paths, returns the object validated
|
||||
* and normalized according to the `validate` callback.
|
||||
*
|
||||
* @returns `undefined` when file not found
|
||||
* @throws Throws when validation fails, displaying a helpful context message.
|
||||
*/
|
||||
export async function getDataFileData<T>(
|
||||
params: DataFileParams & {fileType: string},
|
||||
params: DataFileParams & {
|
||||
/** Used for the "The X file looks invalid" message. */
|
||||
fileType: string;
|
||||
},
|
||||
validate: (content: unknown) => T,
|
||||
): Promise<T | undefined> {
|
||||
const filePath = await getDataFilePath(params);
|
||||
|
@ -54,12 +70,21 @@ export async function getDataFileData<T>(
|
|||
}
|
||||
}
|
||||
|
||||
// Order matters: we look in priority in localized folder
|
||||
/**
|
||||
* Takes the `contentPaths` data structure and returns an ordered path list
|
||||
* indicating their priorities. For all data, we look in the localized folder
|
||||
* in priority.
|
||||
*/
|
||||
export function getContentPathList(contentPaths: ContentPaths): string[] {
|
||||
return [contentPaths.contentPathLocalized, contentPaths.contentPath];
|
||||
}
|
||||
|
||||
// return the first folder path in which the file exists in
|
||||
/**
|
||||
* @param folderPaths a list of absolute paths.
|
||||
* @param relativeFilePath file path relative to each `folderPaths`.
|
||||
* @returns the first folder path in which the file exists, or `undefined` if
|
||||
* none is found.
|
||||
*/
|
||||
export async function findFolderContainingFile(
|
||||
folderPaths: string[],
|
||||
relativeFilePath: string,
|
||||
|
@ -69,6 +94,16 @@ export async function findFolderContainingFile(
|
|||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fail-fast alternative to `findFolderContainingFile`.
|
||||
*
|
||||
* @param folderPaths a list of absolute paths.
|
||||
* @param relativeFilePath file path relative to each `folderPaths`.
|
||||
* @returns the first folder path in which the file exists.
|
||||
* @throws Throws if no file can be found. You should use this method only when
|
||||
* you actually know the file exists (e.g. when the `relativeFilePath` is read
|
||||
* with a glob and you are just trying to localize it)
|
||||
*/
|
||||
export async function getFolderContainingFile(
|
||||
folderPaths: string[],
|
||||
relativeFilePath: string,
|
||||
|
@ -77,12 +112,10 @@ export async function getFolderContainingFile(
|
|||
folderPaths,
|
||||
relativeFilePath,
|
||||
);
|
||||
// should never happen, as the source was read from the FS anyway...
|
||||
if (!maybeFolderPath) {
|
||||
throw new Error(
|
||||
`File "${relativeFilePath}" does not exist in any of these folders:\n- ${folderPaths.join(
|
||||
'\n- ',
|
||||
)}`,
|
||||
`File "${relativeFilePath}" does not exist in any of these folders:
|
||||
- ${folderPaths.join('\n- ')}`,
|
||||
);
|
||||
}
|
||||
return maybeFolderPath;
|
||||
|
|
|
@ -13,6 +13,16 @@ import {findAsyncSequential} from './jsUtils';
|
|||
|
||||
const fileHash = new Map<string, string>();
|
||||
|
||||
/**
|
||||
* Outputs a file to the generated files directory. Only writes files if content
|
||||
* differs from cache (for hot reload performance).
|
||||
*
|
||||
* @param generatedFilesDir Absolute path.
|
||||
* @param file Path relative to `generatedFilesDir`.
|
||||
* @param content String content to write.
|
||||
* @param skipCache If `true` (defaults as `true` for production), file is
|
||||
* force-rewritten, skipping cache.
|
||||
*/
|
||||
export async function generate(
|
||||
generatedFilesDir: string,
|
||||
file: string,
|
||||
|
@ -23,14 +33,21 @@ export async function generate(
|
|||
|
||||
if (skipCache) {
|
||||
await fs.outputFile(filepath, content);
|
||||
// Cache still needs to be reset, otherwise, writing "A", "B", and "A" where
|
||||
// "B" skips cache will cause the last "A" not be able to overwrite as the
|
||||
// first "A" remains in cache. But if the file never existed in cache, no
|
||||
// need to register it.
|
||||
if (fileHash.get(filepath)) {
|
||||
fileHash.set(filepath, createHash('md5').update(content).digest('hex'));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let lastHash = fileHash.get(filepath);
|
||||
|
||||
// If file already exists but its not in runtime cache yet,
|
||||
// we try to calculate the content hash and then compare
|
||||
// This is to avoid unnecessary overwriting and we can reuse old file.
|
||||
// If file already exists but it's not in runtime cache yet, we try to
|
||||
// calculate the content hash and then compare. This is to avoid unnecessary
|
||||
// overwriting and we can reuse old file.
|
||||
if (!lastHash && (await fs.pathExists(filepath))) {
|
||||
const lastContent = await fs.readFile(filepath, 'utf8');
|
||||
lastHash = createHash('md5').update(lastContent).digest('hex');
|
||||
|
@ -45,7 +62,7 @@ export async function generate(
|
|||
}
|
||||
}
|
||||
|
||||
const chunkNameCache = new Map();
|
||||
const chunkNameCache = new Map<string, string>();
|
||||
|
||||
/**
|
||||
* Generate unique chunk name given a module path.
|
||||
|
@ -56,7 +73,7 @@ export function genChunkName(
|
|||
preferredName?: string,
|
||||
shortId: boolean = process.env.NODE_ENV === 'production',
|
||||
): string {
|
||||
let chunkName: string | undefined = chunkNameCache.get(modulePath);
|
||||
let chunkName = chunkNameCache.get(modulePath);
|
||||
if (!chunkName) {
|
||||
if (shortId) {
|
||||
chunkName = simpleHash(modulePath, 8);
|
||||
|
@ -82,6 +99,8 @@ export function genChunkName(
|
|||
* @returns This returns a buffer, which you have to decode string yourself if
|
||||
* needed. (Not always necessary since the output isn't for human consumption
|
||||
* anyways, and most HTML manipulation libs accept buffers)
|
||||
* @throws Throws when the HTML file is not found at any of the potential paths.
|
||||
* This should never happen as it would lead to a 404.
|
||||
*/
|
||||
export async function readOutputHTMLFile(
|
||||
permalink: string,
|
||||
|
|
|
@ -8,23 +8,67 @@
|
|||
import path from 'path';
|
||||
import shell from 'shelljs';
|
||||
|
||||
/** Custom error thrown when git is not found in `PATH`. */
|
||||
export class GitNotFoundError extends Error {}
|
||||
|
||||
/** Custom error thrown when the current file is not tracked by git. */
|
||||
export class FileNotTrackedError extends Error {}
|
||||
|
||||
/**
|
||||
* Fetches the git history of a file and returns a relevant commit date.
|
||||
* It gets the commit date instead of author date so that amended commits
|
||||
* can have their dates updated.
|
||||
*
|
||||
* @throws {GitNotFoundError} If git is not found in `PATH`.
|
||||
* @throws {FileNotTrackedError} If the current file is not tracked by git.
|
||||
* @throws Also throws when `git log` exited with non-zero, or when it outputs
|
||||
* unexpected text.
|
||||
*/
|
||||
export function getFileCommitDate(
|
||||
/** Absolute path to the file. */
|
||||
file: string,
|
||||
args: {age?: 'oldest' | 'newest'; includeAuthor?: false},
|
||||
args: {
|
||||
/**
|
||||
* `"oldest"` is the commit that added the file, following renames;
|
||||
* `"newest"` is the last commit that edited the file.
|
||||
*/
|
||||
age?: 'oldest' | 'newest';
|
||||
/** Use `includeAuthor: true` to get the author information as well. */
|
||||
includeAuthor?: false;
|
||||
},
|
||||
): {
|
||||
/** Relevant commit date. */
|
||||
date: Date;
|
||||
/** Timestamp in **seconds**, as returned from git. */
|
||||
timestamp: number;
|
||||
};
|
||||
/**
|
||||
* Fetches the git history of a file and returns a relevant commit date.
|
||||
* It gets the commit date instead of author date so that amended commits
|
||||
* can have their dates updated.
|
||||
*
|
||||
* @throws {GitNotFoundError} If git is not found in `PATH`.
|
||||
* @throws {FileNotTrackedError} If the current file is not tracked by git.
|
||||
* @throws Also throws when `git log` exited with non-zero, or when it outputs
|
||||
* unexpected text.
|
||||
*/
|
||||
export function getFileCommitDate(
|
||||
/** Absolute path to the file. */
|
||||
file: string,
|
||||
args: {age?: 'oldest' | 'newest'; includeAuthor: true},
|
||||
args: {
|
||||
/**
|
||||
* `"oldest"` is the commit that added the file, following renames;
|
||||
* `"newest"` is the last commit that edited the file.
|
||||
*/
|
||||
age?: 'oldest' | 'newest';
|
||||
includeAuthor: true;
|
||||
},
|
||||
): {
|
||||
/** Relevant commit date. */
|
||||
date: Date;
|
||||
/** Timestamp in **seconds**, as returned from git. */
|
||||
timestamp: number;
|
||||
/** The author's name, as returned from git. */
|
||||
author: string;
|
||||
};
|
||||
export function getFileCommitDate(
|
||||
|
@ -53,8 +97,6 @@ export function getFileCommitDate(
|
|||
);
|
||||
}
|
||||
|
||||
// Commit time and author name; not using author time so that amended commits
|
||||
// can have their dates updated
|
||||
let formatArg = '--format=%ct';
|
||||
if (includeAuthor) {
|
||||
formatArg += ',%an';
|
||||
|
|
|
@ -10,24 +10,31 @@
|
|||
import Micromatch from 'micromatch'; // Note: Micromatch is used by Globby
|
||||
import path from 'path';
|
||||
|
||||
/** A re-export of the globby instance. */
|
||||
export {default as Globby} from 'globby';
|
||||
|
||||
// The default patterns we ignore when globbing
|
||||
// using _ prefix for exclusion by convention
|
||||
/**
|
||||
* The default glob patterns we ignore when sourcing content.
|
||||
* - Ignore files and folders starting with `_` recursively
|
||||
* - Ignore tests
|
||||
*/
|
||||
export const GlobExcludeDefault = [
|
||||
// Ignore files starting with _
|
||||
'**/_*.{js,jsx,ts,tsx,md,mdx}',
|
||||
|
||||
// Ignore folders starting with _ (including folder content)
|
||||
'**/_*/**',
|
||||
|
||||
// Ignore tests
|
||||
'**/*.test.{js,jsx,ts,tsx}',
|
||||
'**/__tests__/**',
|
||||
];
|
||||
|
||||
type Matcher = (str: string) => boolean;
|
||||
|
||||
/**
|
||||
* A very thin wrapper around `Micromatch.makeRe`.
|
||||
*
|
||||
* @see {@link createAbsoluteFilePathMatcher}
|
||||
* @param patterns A list of glob patterns.
|
||||
* @returns A matcher handle that tells if a file path is matched by any of the
|
||||
* patterns.
|
||||
*/
|
||||
export function createMatcher(patterns: string[]): Matcher {
|
||||
const regexp = new RegExp(
|
||||
patterns.map((pattern) => Micromatch.makeRe(pattern).source).join('|'),
|
||||
|
@ -35,10 +42,19 @@ export function createMatcher(patterns: string[]): Matcher {
|
|||
return (str) => regexp.test(str);
|
||||
}
|
||||
|
||||
// We use match patterns like '**/_*/**',
|
||||
// This function permits to help to:
|
||||
// Match /user/sebastien/website/docs/_partials/xyz.md
|
||||
// Ignore /user/_sebastien/website/docs/partials/xyz.md
|
||||
/**
|
||||
* We use match patterns like `"** /_* /**"` (ignore the spaces), where `"_*"`
|
||||
* should only be matched within a subfolder. This function would:
|
||||
* - Match `/user/sebastien/website/docs/_partials/xyz.md`
|
||||
* - Ignore `/user/_sebastien/website/docs/partials/xyz.md`
|
||||
*
|
||||
* @param patterns A list of glob patterns.
|
||||
* @param rootFolders A list of root folders to resolve the glob from.
|
||||
* @returns A matcher handle that tells if a file path is matched by any of the
|
||||
* patterns, resolved from the first root folder that contains the path.
|
||||
* @throws Throws when the returned matcher receives a path that doesn't belong
|
||||
* to any of the `rootFolders`.
|
||||
*/
|
||||
export function createAbsoluteFilePathMatcher(
|
||||
patterns: string[],
|
||||
rootFolders: string[],
|
||||
|
@ -51,8 +67,8 @@ export function createAbsoluteFilePathMatcher(
|
|||
);
|
||||
if (!rootFolder) {
|
||||
throw new Error(
|
||||
`createAbsoluteFilePathMatcher unexpected error, absoluteFilePath=${absoluteFilePath} was not contained in any of the root folders ${JSON.stringify(
|
||||
rootFolders,
|
||||
`createAbsoluteFilePathMatcher unexpected error, absoluteFilePath=${absoluteFilePath} was not contained in any of the root folders: ${rootFolders.join(
|
||||
', ',
|
||||
)}`,
|
||||
);
|
||||
}
|
||||
|
|
|
@ -9,20 +9,21 @@ import {createHash} from 'crypto';
|
|||
import _ from 'lodash';
|
||||
import {shortName, isNameTooLong} from './pathUtils';
|
||||
|
||||
/** Thin wrapper around `crypto.createHash("md5")`. */
|
||||
export function md5Hash(str: string): string {
|
||||
return createHash('md5').update(str).digest('hex');
|
||||
}
|
||||
|
||||
/** Creates an MD5 hash and truncates it to the given length. */
|
||||
export function simpleHash(str: string, length: number): string {
|
||||
return md5Hash(str).substr(0, length);
|
||||
return md5Hash(str).substring(0, length);
|
||||
}
|
||||
|
||||
// Based on https://github.com/gatsbyjs/gatsby/pull/21518/files
|
||||
/**
|
||||
* Given an input string, convert to kebab-case and append a hash.
|
||||
* Avoid str collision.
|
||||
* Also removes part of the string if its larger than the allowed
|
||||
* filename per OS. Avoids ERRNAMETOOLONG error.
|
||||
* Given an input string, convert to kebab-case and append a hash, avoiding name
|
||||
* collision. Also removes part of the string if its larger than the allowed
|
||||
* filename per OS, avoiding `ERRNAMETOOLONG` error.
|
||||
*/
|
||||
export function docuHash(str: string): string {
|
||||
if (str === '/') {
|
||||
|
|
|
@ -8,16 +8,21 @@
|
|||
import path from 'path';
|
||||
import _ from 'lodash';
|
||||
import type {TranslationFileContent, TranslationFile} from '@docusaurus/types';
|
||||
import {DEFAULT_PLUGIN_ID} from './constants';
|
||||
import {DEFAULT_PLUGIN_ID, I18N_DIR_NAME} from './constants';
|
||||
|
||||
/**
|
||||
* Takes a list of translation file contents, and shallow-merges them into one.
|
||||
*/
|
||||
export function mergeTranslations(
|
||||
contents: TranslationFileContent[],
|
||||
): TranslationFileContent {
|
||||
return contents.reduce((acc, content) => ({...acc, ...content}), {});
|
||||
}
|
||||
|
||||
// Useful to update all the messages of a translation file
|
||||
// Used in tests to simulate translations
|
||||
/**
|
||||
* Useful to update all the messages of a translation file. Used in tests to
|
||||
* simulate translations.
|
||||
*/
|
||||
export function updateTranslationFileMessages(
|
||||
translationFile: TranslationFile,
|
||||
updateMessage: (message: string) => string,
|
||||
|
@ -31,6 +36,10 @@ export function updateTranslationFileMessages(
|
|||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Takes everything needed and constructs a plugin i18n path. Plugins should
|
||||
* expect everything it needs for translations to be found under this path.
|
||||
*/
|
||||
export function getPluginI18nPath({
|
||||
siteDir,
|
||||
locale,
|
||||
|
@ -46,7 +55,7 @@ export function getPluginI18nPath({
|
|||
}): string {
|
||||
return path.join(
|
||||
siteDir,
|
||||
'i18n',
|
||||
I18N_DIR_NAME,
|
||||
// namespace first by locale: convenient to work in a single folder for a
|
||||
// translator
|
||||
locale,
|
||||
|
|
|
@ -13,9 +13,11 @@ export {
|
|||
BABEL_CONFIG_FILE_NAME,
|
||||
GENERATED_FILES_DIR_NAME,
|
||||
SRC_DIR_NAME,
|
||||
STATIC_DIR_NAME,
|
||||
DEFAULT_STATIC_DIR_NAME,
|
||||
OUTPUT_STATIC_ASSETS_DIR_NAME,
|
||||
THEME_PATH,
|
||||
I18N_DIR_NAME,
|
||||
CODE_TRANSLATIONS_FILE_NAME,
|
||||
DEFAULT_PORT,
|
||||
DEFAULT_PLUGIN_ID,
|
||||
WEBPACK_URL_LOADER_LIMIT,
|
||||
|
@ -34,7 +36,6 @@ export {
|
|||
export {
|
||||
removeSuffix,
|
||||
removePrefix,
|
||||
getElementsAround,
|
||||
mapAsyncSequential,
|
||||
findAsyncSequential,
|
||||
reportMessage,
|
||||
|
@ -56,8 +57,6 @@ export {
|
|||
export {
|
||||
type Tag,
|
||||
type FrontMatterTag,
|
||||
type TaggedItemGroup,
|
||||
normalizeFrontMatterTag,
|
||||
normalizeFrontMatterTags,
|
||||
groupTaggedItems,
|
||||
} from './tags';
|
||||
|
@ -73,8 +72,6 @@ export {
|
|||
export {
|
||||
type ContentPaths,
|
||||
type BrokenMarkdownLink,
|
||||
type ReplaceMarkdownLinksParams,
|
||||
type ReplaceMarkdownLinksReturn,
|
||||
replaceMarkdownLinks,
|
||||
} from './markdownLinks';
|
||||
export {type SluggerOptions, type Slugger, createSlugger} from './slugger';
|
||||
|
|
|
@ -8,36 +8,27 @@
|
|||
import type {ReportingSeverity} from '@docusaurus/types';
|
||||
import logger from '@docusaurus/logger';
|
||||
|
||||
/** Removes a given string suffix from `str`. */
|
||||
export function removeSuffix(str: string, suffix: string): string {
|
||||
if (suffix === '') {
|
||||
return str; // always returns "" otherwise!
|
||||
// str.slice(0, 0) is ""
|
||||
return str;
|
||||
}
|
||||
return str.endsWith(suffix) ? str.slice(0, -suffix.length) : str;
|
||||
}
|
||||
|
||||
/** Removes a given string prefix from `str`. */
|
||||
export function removePrefix(str: string, prefix: string): string {
|
||||
return str.startsWith(prefix) ? str.slice(prefix.length) : str;
|
||||
}
|
||||
|
||||
export function getElementsAround<T>(
|
||||
array: T[],
|
||||
aroundIndex: number,
|
||||
): {
|
||||
next: T | undefined;
|
||||
previous: T | undefined;
|
||||
} {
|
||||
const min = 0;
|
||||
const max = array.length - 1;
|
||||
if (aroundIndex < min || aroundIndex > max) {
|
||||
throw new Error(
|
||||
`Valid "aroundIndex" for array (of size ${array.length}) are between ${min} and ${max}, but you provided ${aroundIndex}.`,
|
||||
);
|
||||
}
|
||||
const previous = aroundIndex === min ? undefined : array[aroundIndex - 1];
|
||||
const next = aroundIndex === max ? undefined : array[aroundIndex + 1];
|
||||
return {previous, next};
|
||||
}
|
||||
|
||||
/**
|
||||
* `Array#map` for async operations where order matters.
|
||||
* @param array The array to traverse.
|
||||
* @param action An async action to be performed on every array item. Will be
|
||||
* awaited before working on the next.
|
||||
* @returns The list of results returned from every `action(item)`
|
||||
*/
|
||||
export async function mapAsyncSequential<T, R>(
|
||||
array: T[],
|
||||
action: (t: T) => Promise<R>,
|
||||
|
@ -50,6 +41,14 @@ export async function mapAsyncSequential<T, R>(
|
|||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* `Array#find` for async operations where order matters.
|
||||
* @param array The array to traverse.
|
||||
* @param predicate An async predicate to be called on every array item. Should
|
||||
* return a boolean indicating whether the currently element should be returned.
|
||||
* @returns The function immediately returns the first item on which `predicate`
|
||||
* returns `true`, or `undefined` if none matches the predicate.
|
||||
*/
|
||||
export async function findAsyncSequential<T>(
|
||||
array: T[],
|
||||
predicate: (t: T) => Promise<boolean>,
|
||||
|
@ -62,6 +61,21 @@ export async function findAsyncSequential<T>(
|
|||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Takes a message and reports it according to the severity that the user wants.
|
||||
*
|
||||
* - `ignore`: completely no-op
|
||||
* - `log`: uses the `INFO` log level
|
||||
* - `warn`: uses the `WARN` log level
|
||||
* - `error`: uses the `ERROR` log level
|
||||
* - `throw`: aborts the process, throws the error.
|
||||
*
|
||||
* Since the logger doesn't have logging level filters yet, these severities
|
||||
* mostly just differ by their colors.
|
||||
*
|
||||
* @throws In addition to throwing when `reportingSeverity === "throw"`, this
|
||||
* function also throws if `reportingSeverity` is not one of the above.
|
||||
*/
|
||||
export function reportMessage(
|
||||
message: string,
|
||||
reportingSeverity: ReportingSeverity,
|
||||
|
|
|
@ -6,41 +6,79 @@
|
|||
*/
|
||||
|
||||
import path from 'path';
|
||||
import {getContentPathList} from './dataFileUtils';
|
||||
import {aliasedSitePath} from './pathUtils';
|
||||
|
||||
/**
|
||||
* Content plugins have a base path and a localized path to source content from.
|
||||
* We will look into the localized path in priority.
|
||||
*/
|
||||
export type ContentPaths = {
|
||||
/**
|
||||
* The absolute path to the base content directory, like `"<siteDir>/docs"`.
|
||||
*/
|
||||
contentPath: string;
|
||||
/**
|
||||
* The absolute path to the localized content directory, like
|
||||
* `"<siteDir>/i18n/zh-Hans/plugin-content-docs"`.
|
||||
*/
|
||||
contentPathLocalized: string;
|
||||
};
|
||||
|
||||
/** Data structure representing each broken Markdown link to be reported. */
|
||||
export type BrokenMarkdownLink<T extends ContentPaths> = {
|
||||
/** Absolute path to the file containing this link. */
|
||||
filePath: string;
|
||||
/**
|
||||
* This is generic because it may contain extra metadata like version name,
|
||||
* which the reporter can provide for context.
|
||||
*/
|
||||
contentPaths: T;
|
||||
/**
|
||||
* The content of the link, like `"./brokenFile.md"`
|
||||
*/
|
||||
link: string;
|
||||
};
|
||||
|
||||
export type ReplaceMarkdownLinksParams<T extends ContentPaths> = {
|
||||
siteDir: string;
|
||||
fileString: string;
|
||||
filePath: string;
|
||||
contentPaths: T;
|
||||
sourceToPermalink: Record<string, string>;
|
||||
};
|
||||
|
||||
export type ReplaceMarkdownLinksReturn<T extends ContentPaths> = {
|
||||
newContent: string;
|
||||
brokenMarkdownLinks: BrokenMarkdownLink<T>[];
|
||||
};
|
||||
|
||||
/**
|
||||
* Takes a Markdown file and replaces relative file references with their URL
|
||||
* counterparts, e.g. `[link](./intro.md)` => `[link](/docs/intro)`, preserving
|
||||
* everything else.
|
||||
*
|
||||
* This method uses best effort to find a matching file. The file reference can
|
||||
* be relative to the directory of the current file (most likely) or any of the
|
||||
* content paths (so `/tutorials/intro.md` can be resolved as
|
||||
* `<siteDir>/docs/tutorials/intro.md`). Links that contain the `http(s):` or
|
||||
* `@site/` prefix will always be ignored.
|
||||
*/
|
||||
export function replaceMarkdownLinks<T extends ContentPaths>({
|
||||
siteDir,
|
||||
fileString,
|
||||
filePath,
|
||||
contentPaths,
|
||||
sourceToPermalink,
|
||||
}: ReplaceMarkdownLinksParams<T>): ReplaceMarkdownLinksReturn<T> {
|
||||
const {contentPath, contentPathLocalized} = contentPaths;
|
||||
|
||||
}: {
|
||||
/** Absolute path to the site directory, used to resolve aliased paths. */
|
||||
siteDir: string;
|
||||
/** The Markdown file content to be processed. */
|
||||
fileString: string;
|
||||
/** Absolute path to the current file containing `fileString`. */
|
||||
filePath: string;
|
||||
/** The content paths which the file reference may live in. */
|
||||
contentPaths: T;
|
||||
/**
|
||||
* A map from source paths to their URLs. Source paths are `@site` aliased.
|
||||
*/
|
||||
sourceToPermalink: Record<string, string>;
|
||||
}): {
|
||||
/**
|
||||
* The content with all Markdown file references replaced with their URLs.
|
||||
* Unresolved links are left as-is.
|
||||
*/
|
||||
newContent: string;
|
||||
/** The list of broken links, */
|
||||
brokenMarkdownLinks: BrokenMarkdownLink<T>[];
|
||||
} {
|
||||
const brokenMarkdownLinks: BrokenMarkdownLink<T>[] = [];
|
||||
|
||||
// Replace internal markdown linking (except in fenced blocks).
|
||||
|
@ -64,9 +102,8 @@ export function replaceMarkdownLinks<T extends ContentPaths>({
|
|||
|
||||
let modifiedLine = line;
|
||||
// Replace inline-style links or reference-style links e.g:
|
||||
// This is [Document 1](doc1.md) -> we replace this doc1.md with correct
|
||||
// ink
|
||||
// [doc1]: doc1.md -> we replace this doc1.md with correct link
|
||||
// This is [Document 1](doc1.md)
|
||||
// [doc1]: doc1.md
|
||||
const mdRegex =
|
||||
/(?:\]\(|\]:\s*)(?!https?:\/\/|@site\/)(?<filename>[^'")\]\s>]+\.mdx?)/g;
|
||||
let mdMatch = mdRegex.exec(modifiedLine);
|
||||
|
@ -75,10 +112,9 @@ export function replaceMarkdownLinks<T extends ContentPaths>({
|
|||
const mdLink = mdMatch.groups!.filename!;
|
||||
|
||||
const sourcesToTry = [
|
||||
path.resolve(path.dirname(filePath), decodeURIComponent(mdLink)),
|
||||
`${contentPathLocalized}/${decodeURIComponent(mdLink)}`,
|
||||
`${contentPath}/${decodeURIComponent(mdLink)}`,
|
||||
];
|
||||
path.dirname(filePath),
|
||||
...getContentPathList(contentPaths),
|
||||
].map((p) => path.join(p, decodeURIComponent(mdLink)));
|
||||
|
||||
const aliasedSourceMatch = sourcesToTry
|
||||
.map((source) => aliasedSitePath(source, siteDir))
|
||||
|
|
|
@ -7,12 +7,25 @@
|
|||
|
||||
import logger from '@docusaurus/logger';
|
||||
import matter from 'gray-matter';
|
||||
import {createSlugger, type Slugger} from './slugger';
|
||||
import {createSlugger, type Slugger, type SluggerOptions} from './slugger';
|
||||
|
||||
// Input: ## Some heading {#some-heading}
|
||||
// Output: {text: "## Some heading", id: "some-heading"}
|
||||
// Some utilities for parsing Markdown content. These things are only used on
|
||||
// server-side when we infer metadata like `title` and `description` from the
|
||||
// content. Most parsing is still done in MDX through the mdx-loader.
|
||||
|
||||
/**
|
||||
* Parses custom ID from a heading. The ID must be composed of letters,
|
||||
* underscores, and dashes only.
|
||||
*
|
||||
* @param heading e.g. `## Some heading {#some-heading}` where the last
|
||||
* character must be `}` for the ID to be recognized
|
||||
*/
|
||||
export function parseMarkdownHeadingId(heading: string): {
|
||||
/**
|
||||
* The heading content sans the ID part, right-trimmed. e.g. `## Some heading`
|
||||
*/
|
||||
text: string;
|
||||
/** The heading ID. e.g. `some-heading` */
|
||||
id?: string;
|
||||
} {
|
||||
const customHeadingIdRegex = /\s*\{#(?<id>[\w-]+)\}$/;
|
||||
|
@ -26,26 +39,40 @@ export function parseMarkdownHeadingId(heading: string): {
|
|||
return {text: heading, id: undefined};
|
||||
}
|
||||
|
||||
// Hacky way of stripping out import statements from the excerpt
|
||||
// TODO: Find a better way to do so, possibly by compiling the Markdown content,
|
||||
// stripping out HTML tags and obtaining the first line.
|
||||
/**
|
||||
* Creates an excerpt of a Markdown file. This function will:
|
||||
*
|
||||
* - Ignore h1 headings (setext or atx)
|
||||
* - Ignore import/export
|
||||
* - Ignore code blocks
|
||||
*
|
||||
* And for the first contentful line, it will strip away most Markdown
|
||||
* syntax, including HTML tags, emphasis, links (keeping the text), etc.
|
||||
*/
|
||||
export function createExcerpt(fileString: string): string | undefined {
|
||||
const fileLines = fileString
|
||||
.trimLeft()
|
||||
.trimStart()
|
||||
// Remove Markdown alternate title
|
||||
.replace(/^[^\n]*\n[=]+/g, '')
|
||||
.split('\n');
|
||||
let inCode = false;
|
||||
let inImport = false;
|
||||
let lastCodeFence = '';
|
||||
|
||||
for (const fileLine of fileLines) {
|
||||
if (fileLine === '' && inImport) {
|
||||
inImport = false;
|
||||
}
|
||||
// Skip empty line.
|
||||
if (!fileLine.trim()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip import/export declaration.
|
||||
if (/^(?:import|export)\s.*/.test(fileLine)) {
|
||||
if ((/^(?:import|export)\s.*/.test(fileLine) || inImport) && !inCode) {
|
||||
inImport = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -102,8 +129,22 @@ export function createExcerpt(fileString: string): string | undefined {
|
|||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Takes a raw Markdown file content, and parses the front matter using
|
||||
* gray-matter. Worth noting that gray-matter accepts TOML and other markup
|
||||
* languages as well.
|
||||
*
|
||||
* @throws Throws when gray-matter throws. e.g.:
|
||||
* ```md
|
||||
* ---
|
||||
* foo: : bar
|
||||
* ---
|
||||
* ```
|
||||
*/
|
||||
export function parseFrontMatter(markdownFileContent: string): {
|
||||
/** Front matter as parsed by gray-matter. */
|
||||
frontMatter: Record<string, unknown>;
|
||||
/** The remaining content, trimmed. */
|
||||
content: string;
|
||||
} {
|
||||
const {data, content} = matter(markdownFileContent);
|
||||
|
@ -113,11 +154,6 @@ export function parseFrontMatter(markdownFileContent: string): {
|
|||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to convert markdown heading to text. Does not need to be perfect, it is
|
||||
* only used as a fallback when frontMatter.title is not provided. For now, we
|
||||
* just unwrap possible inline code blocks (# `config.js`)
|
||||
*/
|
||||
function toTextContentTitle(contentTitle: string): string {
|
||||
if (contentTitle.startsWith('`') && contentTitle.endsWith('`')) {
|
||||
return contentTitle.substring(1, contentTitle.length - 1);
|
||||
|
@ -125,10 +161,36 @@ function toTextContentTitle(contentTitle: string): string {
|
|||
return contentTitle;
|
||||
}
|
||||
|
||||
type ParseMarkdownContentTitleOptions = {
|
||||
/**
|
||||
* If `true`, the matching title will be removed from the returned content.
|
||||
* We can promise that at least one empty line will be left between the
|
||||
* content before and after, but you shouldn't make too much assumption
|
||||
* about what's left.
|
||||
*/
|
||||
removeContentTitle?: boolean;
|
||||
};
|
||||
|
||||
/**
|
||||
* Takes the raw Markdown content, without front matter, and tries to find an h1
|
||||
* title (setext or atx) to be used as metadata.
|
||||
*
|
||||
* It only searches until the first contentful paragraph, ignoring import/export
|
||||
* declarations.
|
||||
*
|
||||
* It will try to convert markdown to reasonable text, but won't be best effort,
|
||||
* since it's only used as a fallback when `frontMatter.title` is not provided.
|
||||
* For now, we just unwrap inline code (``# `config.js` `` => `config.js`).
|
||||
*/
|
||||
export function parseMarkdownContentTitle(
|
||||
contentUntrimmed: string,
|
||||
options?: {removeContentTitle?: boolean},
|
||||
): {content: string; contentTitle: string | undefined} {
|
||||
options?: ParseMarkdownContentTitleOptions,
|
||||
): {
|
||||
/** The content, optionally without the content title. */
|
||||
content: string;
|
||||
/** The title, trimmed and without the `#`. */
|
||||
contentTitle: string | undefined;
|
||||
} {
|
||||
const removeContentTitleOption = options?.removeContentTitle ?? false;
|
||||
|
||||
const content = contentUntrimmed.trim();
|
||||
|
@ -171,17 +233,28 @@ export function parseMarkdownContentTitle(
|
|||
};
|
||||
}
|
||||
|
||||
type ParsedMarkdown = {
|
||||
frontMatter: Record<string, unknown>;
|
||||
content: string;
|
||||
contentTitle: string | undefined;
|
||||
excerpt: string | undefined;
|
||||
};
|
||||
|
||||
/**
|
||||
* Makes a full-round parse.
|
||||
*
|
||||
* @throws Throws when `parseFrontMatter` throws, usually because of invalid
|
||||
* syntax.
|
||||
*/
|
||||
export function parseMarkdownString(
|
||||
markdownFileContent: string,
|
||||
options?: {removeContentTitle?: boolean},
|
||||
): ParsedMarkdown {
|
||||
options?: ParseMarkdownContentTitleOptions,
|
||||
): {
|
||||
/** @see {@link parseFrontMatter} */
|
||||
frontMatter: Record<string, unknown>;
|
||||
/** @see {@link parseMarkdownContentTitle} */
|
||||
contentTitle: string | undefined;
|
||||
/** @see {@link createExcerpt} */
|
||||
excerpt: string | undefined;
|
||||
/**
|
||||
* Content without front matter and (optionally) without title, depending on
|
||||
* the `removeContentTitle` option.
|
||||
*/
|
||||
content: string;
|
||||
} {
|
||||
try {
|
||||
const {frontMatter, content: contentWithoutFrontMatter} =
|
||||
parseFrontMatter(markdownFileContent);
|
||||
|
@ -229,11 +302,16 @@ function addHeadingId(
|
|||
return `${headingHashes}${headingText} {#${slug}}`;
|
||||
}
|
||||
|
||||
export type WriteHeadingIDOptions = {
|
||||
maintainCase?: boolean;
|
||||
export type WriteHeadingIDOptions = SluggerOptions & {
|
||||
/** Overwrite existing heading IDs. */
|
||||
overwrite?: boolean;
|
||||
};
|
||||
|
||||
/**
|
||||
* Takes Markdown content, returns new content with heading IDs written.
|
||||
* Respects existing IDs (unless `overwrite=true`) and never generates colliding
|
||||
* IDs (through the slugger).
|
||||
*/
|
||||
export function writeMarkdownHeadingId(
|
||||
content: string,
|
||||
options: WriteHeadingIDOptions = {maintainCase: false, overwrite: false},
|
||||
|
|
|
@ -24,7 +24,7 @@ export const isNameTooLong = (str: string): boolean =>
|
|||
? str.length + SPACE_FOR_APPENDING > MAX_PATH_SEGMENT_CHARS // MacOS (APFS) and Windows (NTFS) filename length limit (255 chars)
|
||||
: Buffer.from(str).length + SPACE_FOR_APPENDING > MAX_PATH_SEGMENT_BYTES; // Other (255 bytes)
|
||||
|
||||
export const shortName = (str: string): string => {
|
||||
export function shortName(str: string): string {
|
||||
if (isMacOs() || isWindows()) {
|
||||
const overflowingChars = str.length - MAX_PATH_SEGMENT_CHARS;
|
||||
return str.slice(
|
||||
|
@ -41,7 +41,7 @@ export const shortName = (str: string): string => {
|
|||
Buffer.byteLength(strBuffer) - overflowingBytes - SPACE_FOR_APPENDING - 1,
|
||||
)
|
||||
.toString();
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert Windows backslash paths to posix style paths.
|
||||
|
|
|
@ -10,12 +10,24 @@ import GithubSlugger from 'github-slugger';
|
|||
// We create our own abstraction on top of the lib:
|
||||
// - unify usage everywhere in the codebase
|
||||
// - ability to add extra options
|
||||
export type SluggerOptions = {maintainCase?: boolean};
|
||||
export type SluggerOptions = {
|
||||
/** Keep the headings' casing, otherwise make all lowercase. */
|
||||
maintainCase?: boolean;
|
||||
};
|
||||
|
||||
export type Slugger = {
|
||||
/**
|
||||
* Takes a Markdown heading like "Josh Cena" and sluggifies it according to
|
||||
* GitHub semantics (in this case `josh-cena`). Stateful, because if you try
|
||||
* to sluggify "Josh Cena" again it would return `josh-cena-1`.
|
||||
*/
|
||||
slug: (value: string, options?: SluggerOptions) => string;
|
||||
};
|
||||
|
||||
/**
|
||||
* A thin wrapper around github-slugger. This is a factory function that returns
|
||||
* a stateful Slugger object.
|
||||
*/
|
||||
export function createSlugger(): Slugger {
|
||||
const githubSlugger = new GithubSlugger();
|
||||
return {
|
||||
|
|
|
@ -10,12 +10,13 @@ import {normalizeUrl} from './urlUtils';
|
|||
|
||||
export type Tag = {
|
||||
label: string;
|
||||
/** Permalink to this tag's page, without the `/tags/` base path. */
|
||||
permalink: string;
|
||||
};
|
||||
|
||||
export type FrontMatterTag = string | Tag;
|
||||
|
||||
export function normalizeFrontMatterTag(
|
||||
function normalizeFrontMatterTag(
|
||||
tagsPath: string,
|
||||
frontMatterTag: FrontMatterTag,
|
||||
): Tag {
|
||||
|
@ -45,8 +46,19 @@ export function normalizeFrontMatterTag(
|
|||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Takes tag objects as they are defined in front matter, and normalizes each
|
||||
* into a standard tag object. The permalink is created by appending the
|
||||
* sluggified label to `tagsPath`. Front matter tags already containing
|
||||
* permalinks would still have `tagsPath` prepended.
|
||||
*
|
||||
* The result will always be unique by permalinks. The behavior with colliding
|
||||
* permalinks is undetermined.
|
||||
*/
|
||||
export function normalizeFrontMatterTags(
|
||||
/** Base path to append the tag permalinks to. */
|
||||
tagsPath: string,
|
||||
/** Can be `undefined`, so that we can directly pipe in `frontMatter.tags`. */
|
||||
frontMatterTags: FrontMatterTag[] | undefined = [],
|
||||
): Tag[] {
|
||||
const tags = frontMatterTags.map((tag) =>
|
||||
|
@ -56,29 +68,34 @@ export function normalizeFrontMatterTags(
|
|||
return _.uniqBy(tags, (tag) => tag.permalink);
|
||||
}
|
||||
|
||||
export type TaggedItemGroup<Item> = {
|
||||
type TaggedItemGroup<Item> = {
|
||||
tag: Tag;
|
||||
items: Item[];
|
||||
};
|
||||
|
||||
/**
|
||||
* Permits to group docs/blogPosts by tag (provided by front matter)
|
||||
* Note: groups are indexed by permalink, because routes must be unique in the
|
||||
* end. Labels may vary on 2 md files but they are normalized. Docs with
|
||||
* label='some label' and label='some-label' should end-up in the same
|
||||
* group/page in the end. We can't create 2 routes /some-label because one would
|
||||
* override the other
|
||||
* Permits to group docs/blog posts by tag (provided by front matter).
|
||||
*
|
||||
* @returns a map from tag permalink to the items and other relevant tag data.
|
||||
* The record is indexed by permalink, because routes must be unique in the end.
|
||||
* Labels may vary on 2 MD files but they are normalized. Docs with
|
||||
* label='some label' and label='some-label' should end up in the same page.
|
||||
*/
|
||||
export function groupTaggedItems<Item>(
|
||||
items: readonly Item[],
|
||||
/**
|
||||
* A callback telling me how to get the tags list of the current item. Usually
|
||||
* simply getting it from some metadata of the current item.
|
||||
*/
|
||||
getItemTags: (item: Item) => readonly Tag[],
|
||||
): Record<string, TaggedItemGroup<Item>> {
|
||||
const result: Record<string, TaggedItemGroup<Item>> = {};
|
||||
): {[permalink: string]: TaggedItemGroup<Item>} {
|
||||
const result: {[permalink: string]: TaggedItemGroup<Item>} = {};
|
||||
|
||||
function handleItemTag(item: Item, tag: Tag) {
|
||||
items.forEach((item) => {
|
||||
getItemTags(item).forEach((tag) => {
|
||||
// Init missing tag groups
|
||||
// TODO: it's not really clear what should be the behavior if 2 items have
|
||||
// the same tag but the permalink is different for each
|
||||
// TODO: it's not really clear what should be the behavior if 2 tags have
|
||||
// the same permalink but the label is different for each
|
||||
// For now, the first tag found wins
|
||||
result[tag.permalink] ??= {
|
||||
tag,
|
||||
|
@ -87,11 +104,6 @@ export function groupTaggedItems<Item>(
|
|||
|
||||
// Add item to group
|
||||
result[tag.permalink]!.items.push(item);
|
||||
}
|
||||
|
||||
items.forEach((item) => {
|
||||
getItemTags(item).forEach((tag) => {
|
||||
handleItemTag(item, tag);
|
||||
});
|
||||
});
|
||||
|
||||
|
|
|
@ -8,6 +8,18 @@
|
|||
import {removeSuffix} from './jsUtils';
|
||||
import resolvePathnameUnsafe from 'resolve-pathname';
|
||||
|
||||
/**
|
||||
* Much like `path.join`, but much better. Takes an array of URL segments, and
|
||||
* joins them into a reasonable URL.
|
||||
*
|
||||
* - `["file:", "/home", "/user/", "website"]` => `file:///home/user/website`
|
||||
* - `["file://", "home", "/user/", "website"]` => `file://home/user/website` (relative!)
|
||||
* - Remove trailing slash before parameters or hash.
|
||||
* - Replace `?` in query parameters with `&`.
|
||||
* - Dedupe forward slashes in the entire path, avoiding protocol slashes.
|
||||
*
|
||||
* @throws {TypeError} If any of the URL segment is not a string, this throws.
|
||||
*/
|
||||
export function normalizeUrl(rawUrls: string[]): string {
|
||||
const urls = [...rawUrls];
|
||||
const resultArray = [];
|
||||
|
@ -75,8 +87,8 @@ export function normalizeUrl(rawUrls: string[]): string {
|
|||
}
|
||||
|
||||
let str = resultArray.join('/');
|
||||
// Each input component is now separated by a single slash
|
||||
// except the possible first plain protocol part.
|
||||
// Each input component is now separated by a single slash except the possible
|
||||
// first plain protocol part.
|
||||
|
||||
// Remove trailing slash before parameters or hash.
|
||||
str = str.replace(/\/(?<search>\?|&|#[^!])/g, '$1');
|
||||
|
@ -94,6 +106,11 @@ export function normalizeUrl(rawUrls: string[]): string {
|
|||
return str;
|
||||
}
|
||||
|
||||
/**
|
||||
* Takes a file's path, relative to its content folder, and computes its edit
|
||||
* URL. If `editUrl` is `undefined`, this returns `undefined`, as is the case
|
||||
* when the user doesn't want an edit URL in her config.
|
||||
*/
|
||||
export function getEditUrl(
|
||||
fileRelativePath: string,
|
||||
editUrl?: string,
|
||||
|
@ -105,8 +122,8 @@ export function getEditUrl(
|
|||
}
|
||||
|
||||
/**
|
||||
* Convert filepath to url path.
|
||||
* Example: 'index.md' -> '/', 'foo/bar.js' -> '/foo/bar',
|
||||
* Converts file path to a reasonable URL path, e.g. `'index.md'` -> `'/'`,
|
||||
* `'foo/bar.js'` -> `'/foo/bar'`
|
||||
*/
|
||||
export function fileToPath(file: string): string {
|
||||
const indexRE = /(?<dirname>^|.*\/)index\.(?:mdx?|jsx?|tsx?)$/i;
|
||||
|
@ -118,6 +135,13 @@ export function fileToPath(file: string): string {
|
|||
return `/${file.replace(extRE, '').replace(/\\/g, '/')}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Similar to `encodeURI`, but uses `encodeURIComponent` and assumes there's no
|
||||
* query.
|
||||
*
|
||||
* `encodeURI("/question?/answer")` => `"/question?/answer#section"`;
|
||||
* `encodePath("/question?/answer#section")` => `"/question%3F/answer%23foo"`
|
||||
*/
|
||||
export function encodePath(userPath: string): string {
|
||||
return userPath
|
||||
.split('/')
|
||||
|
@ -125,6 +149,10 @@ export function encodePath(userPath: string): string {
|
|||
.join('/');
|
||||
}
|
||||
|
||||
/**
|
||||
* Whether `str` is a valid pathname. It must be absolute, and not contain
|
||||
* special characters.
|
||||
*/
|
||||
export function isValidPathname(str: string): boolean {
|
||||
if (!str.startsWith('/')) {
|
||||
return false;
|
||||
|
@ -138,22 +166,31 @@ export function isValidPathname(str: string): boolean {
|
|||
}
|
||||
}
|
||||
|
||||
// resolve pathname and fail fast if resolution fails
|
||||
/**
|
||||
* Resolve pathnames and fail-fast if resolution fails. Uses standard URL
|
||||
* semantics (provided by `resolve-pathname` which is used internally by React
|
||||
* router)
|
||||
*/
|
||||
export function resolvePathname(to: string, from?: string): string {
|
||||
return resolvePathnameUnsafe(to, from);
|
||||
}
|
||||
/** Appends a leading slash to `str`, if one doesn't exist. */
|
||||
export function addLeadingSlash(str: string): string {
|
||||
return str.startsWith('/') ? str : `/${str}`;
|
||||
}
|
||||
|
||||
// TODO deduplicate: also present in @docusaurus/utils-common
|
||||
/** Appends a trailing slash to `str`, if one doesn't exist. */
|
||||
export function addTrailingSlash(str: string): string {
|
||||
return str.endsWith('/') ? str : `${str}/`;
|
||||
}
|
||||
|
||||
/** Removes the trailing slash from `str`. */
|
||||
export function removeTrailingSlash(str: string): string {
|
||||
return removeSuffix(str, '/');
|
||||
}
|
||||
|
||||
/** Constructs an SSH URL that can be used to push to GitHub. */
|
||||
export function buildSshUrl(
|
||||
githubHost: string,
|
||||
organizationName: string,
|
||||
|
@ -166,6 +203,7 @@ export function buildSshUrl(
|
|||
return `git@${githubHost}:${organizationName}/${projectName}.git`;
|
||||
}
|
||||
|
||||
/** Constructs an HTTP URL that can be used to push to GitHub. */
|
||||
export function buildHttpsUrl(
|
||||
gitCredentials: string,
|
||||
githubHost: string,
|
||||
|
@ -179,6 +217,11 @@ export function buildHttpsUrl(
|
|||
return `https://${gitCredentials}@${githubHost}/${organizationName}/${projectName}.git`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Whether the current URL is an SSH protocol. In addition to looking for
|
||||
* `ssh:`, it will also allow protocol-less URLs like
|
||||
* `git@github.com:facebook/docusaurus.git`.
|
||||
*/
|
||||
export function hasSSHProtocol(sourceRepoUrl: string): boolean {
|
||||
try {
|
||||
if (new URL(sourceRepoUrl).protocol === 'ssh:') {
|
||||
|
@ -187,6 +230,6 @@ export function hasSSHProtocol(sourceRepoUrl: string): boolean {
|
|||
return false;
|
||||
} catch {
|
||||
// Fails when there isn't a protocol
|
||||
return /^(?:[\w-]+@)?[\w.-]+:[\w./-]+/.test(sourceRepoUrl); // git@github.com:facebook/docusaurus.git
|
||||
return /^(?:[\w-]+@)?[\w.-]+:[\w./-]+/.test(sourceRepoUrl);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,7 +31,11 @@ type FileLoaderUtils = {
|
|||
};
|
||||
};
|
||||
|
||||
// Inspired by https://github.com/gatsbyjs/gatsby/blob/8e6e021014da310b9cc7d02e58c9b3efe938c665/packages/gatsby/src/utils/webpack-utils.ts#L447
|
||||
/**
|
||||
* Returns unified loader configurations to be used for various file types.
|
||||
*
|
||||
* Inspired by https://github.com/gatsbyjs/gatsby/blob/8e6e021014da310b9cc7d02e58c9b3efe938c665/packages/gatsby/src/utils/webpack-utils.ts#L447
|
||||
*/
|
||||
export function getFileLoaderUtils(): FileLoaderUtils {
|
||||
// files/images < urlLoaderLimit will be inlined as base64 strings directly in
|
||||
// the html
|
||||
|
@ -39,7 +43,11 @@ export function getFileLoaderUtils(): FileLoaderUtils {
|
|||
|
||||
// defines the path/pattern of the assets handled by webpack
|
||||
const fileLoaderFileName = (folder: AssetFolder) =>
|
||||
`${OUTPUT_STATIC_ASSETS_DIR_NAME}/${folder}/[name]-[contenthash].[ext]`;
|
||||
path.posix.join(
|
||||
OUTPUT_STATIC_ASSETS_DIR_NAME,
|
||||
folder,
|
||||
'[name]-[contenthash].[ext]',
|
||||
);
|
||||
|
||||
const loaders: FileLoaderUtils['loaders'] = {
|
||||
file: (options: {folder: AssetFolder}) => ({
|
||||
|
|
|
@ -6,7 +6,10 @@
|
|||
*/
|
||||
|
||||
import type {DocusaurusConfig, I18nConfig} from '@docusaurus/types';
|
||||
import {DEFAULT_CONFIG_FILE_NAME, STATIC_DIR_NAME} from '@docusaurus/utils';
|
||||
import {
|
||||
DEFAULT_CONFIG_FILE_NAME,
|
||||
DEFAULT_STATIC_DIR_NAME,
|
||||
} from '@docusaurus/utils';
|
||||
import {Joi, URISchema, printWarning} from '@docusaurus/utils-validation';
|
||||
|
||||
const DEFAULT_I18N_LOCALE = 'en';
|
||||
|
@ -53,7 +56,7 @@ export const DEFAULT_CONFIG: Pick<
|
|||
noIndex: false,
|
||||
tagline: '',
|
||||
baseUrlIssueBanner: true,
|
||||
staticDirectories: [STATIC_DIR_NAME],
|
||||
staticDirectories: [DEFAULT_STATIC_DIR_NAME],
|
||||
};
|
||||
|
||||
function createPluginSchema(theme: boolean) {
|
||||
|
|
|
@ -14,7 +14,12 @@ import type {
|
|||
TranslationMessage,
|
||||
InitializedPlugin,
|
||||
} from '@docusaurus/types';
|
||||
import {getPluginI18nPath, toMessageRelativeFilePath} from '@docusaurus/utils';
|
||||
import {
|
||||
getPluginI18nPath,
|
||||
toMessageRelativeFilePath,
|
||||
I18N_DIR_NAME,
|
||||
CODE_TRANSLATIONS_FILE_NAME,
|
||||
} from '@docusaurus/utils';
|
||||
import {Joi} from '@docusaurus/utils-validation';
|
||||
import logger from '@docusaurus/logger';
|
||||
|
||||
|
@ -140,7 +145,7 @@ Maybe you should remove them? ${unknownKeys}`;
|
|||
|
||||
// should we make this configurable?
|
||||
function getTranslationsDirPath(context: TranslationContext): string {
|
||||
return path.resolve(path.join(context.siteDir, `i18n`));
|
||||
return path.resolve(path.join(context.siteDir, I18N_DIR_NAME));
|
||||
}
|
||||
export function getTranslationsLocaleDirPath(
|
||||
context: TranslationContext,
|
||||
|
@ -149,7 +154,10 @@ export function getTranslationsLocaleDirPath(
|
|||
}
|
||||
|
||||
function getCodeTranslationsFilePath(context: TranslationContext): string {
|
||||
return path.join(getTranslationsLocaleDirPath(context), 'code.json');
|
||||
return path.join(
|
||||
getTranslationsLocaleDirPath(context),
|
||||
CODE_TRANSLATIONS_FILE_NAME,
|
||||
);
|
||||
}
|
||||
|
||||
export async function readCodeTranslationFileContent(
|
||||
|
|
|
@ -256,9 +256,13 @@ sebastienlorber
|
|||
sensical
|
||||
serializers
|
||||
setaf
|
||||
setext
|
||||
sida
|
||||
simen
|
||||
slorber
|
||||
sluggified
|
||||
sluggifies
|
||||
sluggify
|
||||
spâce
|
||||
stackblitz
|
||||
stackblitzrc
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue