mirror of
https://github.com/facebook/docusaurus.git
synced 2025-05-02 19:57:25 +02:00
387 lines
11 KiB
TypeScript
387 lines
11 KiB
TypeScript
/**
|
|
* Copyright (c) Facebook, Inc. and its affiliates.
|
|
*
|
|
* This source code is licensed under the MIT license found in the
|
|
* LICENSE file in the root directory of this source tree.
|
|
*/
|
|
|
|
import path from 'path';
|
|
import matter from 'gray-matter';
|
|
import {createHash} from 'crypto';
|
|
import camelCase from 'lodash.camelcase';
|
|
import kebabCase from 'lodash.kebabcase';
|
|
import escapeStringRegexp from 'escape-string-regexp';
|
|
import fs from 'fs-extra';
|
|
import {URL} from 'url';
|
|
|
|
const fileHash = new Map();
|
|
export async function generate(
|
|
generatedFilesDir: string,
|
|
file: string,
|
|
content: any,
|
|
skipCache: boolean = process.env.NODE_ENV === 'production',
|
|
): Promise<void> {
|
|
const filepath = path.join(generatedFilesDir, file);
|
|
|
|
if (skipCache) {
|
|
await fs.ensureDir(path.dirname(filepath));
|
|
await fs.writeFile(filepath, content);
|
|
return;
|
|
}
|
|
|
|
let lastHash = fileHash.get(filepath);
|
|
|
|
// If file already exists but its not in runtime cache yet,
|
|
// we try to calculate the content hash and then compare
|
|
// This is to avoid unnecessary overwriting and we can reuse old file.
|
|
if (!lastHash && fs.existsSync(filepath)) {
|
|
const lastContent = await fs.readFile(filepath, 'utf8');
|
|
lastHash = createHash('md5').update(lastContent).digest('hex');
|
|
fileHash.set(filepath, lastHash);
|
|
}
|
|
|
|
const currentHash = createHash('md5').update(content).digest('hex');
|
|
|
|
if (lastHash !== currentHash) {
|
|
await fs.ensureDir(path.dirname(filepath));
|
|
await fs.writeFile(filepath, content);
|
|
fileHash.set(filepath, currentHash);
|
|
}
|
|
}
|
|
|
|
export function objectWithKeySorted(obj: {[index: string]: any}) {
|
|
// https://github.com/lodash/lodash/issues/1459#issuecomment-460941233
|
|
return Object.keys(obj)
|
|
.sort()
|
|
.reduce((acc: any, key: string) => {
|
|
acc[key] = obj[key];
|
|
return acc;
|
|
}, {});
|
|
}
|
|
|
|
const indexRE = /(^|.*\/)index\.(md|js|jsx|ts|tsx)$/i;
|
|
const extRE = /\.(md|js|tsx)$/;
|
|
|
|
/**
|
|
* Convert filepath to url path.
|
|
* Example: 'index.md' -> '/', 'foo/bar.js' -> '/foo/bar',
|
|
*/
|
|
export function fileToPath(file: string): string {
|
|
if (indexRE.test(file)) {
|
|
return file.replace(indexRE, '/$1');
|
|
}
|
|
return `/${file.replace(extRE, '').replace(/\\/g, '/')}`;
|
|
}
|
|
|
|
export function encodePath(userpath: string): string {
|
|
return userpath
|
|
.split('/')
|
|
.map((item) => encodeURIComponent(item))
|
|
.join('/');
|
|
}
|
|
|
|
/**
|
|
* Given an input string, convert to kebab-case and append a hash.
|
|
* Avoid str collision.
|
|
*/
|
|
export function docuHash(str: string): string {
|
|
if (str === '/') {
|
|
return 'index';
|
|
}
|
|
const shortHash = createHash('md5').update(str).digest('hex').substr(0, 3);
|
|
return `${kebabCase(str)}-${shortHash}`;
|
|
}
|
|
|
|
/**
|
|
* Convert first string character to the upper case.
|
|
* E.g: docusaurus -> Docusaurus
|
|
*/
|
|
export function upperFirst(str: string): string {
|
|
return str ? str.charAt(0).toUpperCase() + str.slice(1) : '';
|
|
}
|
|
|
|
/**
|
|
* Generate unique React Component Name.
|
|
* E.g: /foo-bar -> FooBar096
|
|
*/
|
|
export function genComponentName(pagePath: string): string {
|
|
if (pagePath === '/') {
|
|
return 'index';
|
|
}
|
|
const pageHash = docuHash(pagePath);
|
|
return upperFirst(camelCase(pageHash));
|
|
}
|
|
|
|
/**
|
|
* Convert Windows backslash paths to posix style paths.
|
|
* E.g: endi\\lie -> endi/lie
|
|
*/
|
|
export function posixPath(str: string): string {
|
|
const isExtendedLengthPath = /^\\\\\?\\/.test(str);
|
|
const hasNonAscii = /[^\u0000-\u0080]+/.test(str); // eslint-disable-line
|
|
|
|
if (isExtendedLengthPath || hasNonAscii) {
|
|
return str;
|
|
}
|
|
return str.replace(/\\/g, '/');
|
|
}
|
|
|
|
const chunkNameCache = new Map();
|
|
/**
|
|
* Generate unique chunk name given a module path.
|
|
*/
|
|
export function genChunkName(
|
|
modulePath: string,
|
|
prefix?: string,
|
|
preferredName?: string,
|
|
shortId: boolean = process.env.NODE_ENV === 'production',
|
|
): string {
|
|
let chunkName: string | undefined = chunkNameCache.get(modulePath);
|
|
if (!chunkName) {
|
|
if (shortId) {
|
|
chunkName = createHash('md5')
|
|
.update(modulePath)
|
|
.digest('hex')
|
|
.substr(0, 8);
|
|
} else {
|
|
let str = modulePath;
|
|
if (preferredName) {
|
|
const shortHash = createHash('md5')
|
|
.update(modulePath)
|
|
.digest('hex')
|
|
.substr(0, 3);
|
|
str = `${preferredName}${shortHash}`;
|
|
}
|
|
const name = str === '/' ? 'index' : docuHash(str);
|
|
chunkName = prefix ? `${prefix}---${name}` : name;
|
|
}
|
|
chunkNameCache.set(modulePath, chunkName);
|
|
}
|
|
return chunkName;
|
|
}
|
|
|
|
export function idx(target: any, keyPaths?: string | (string | number)[]): any {
|
|
return (
|
|
target &&
|
|
keyPaths &&
|
|
(Array.isArray(keyPaths)
|
|
? keyPaths.reduce((obj, key) => obj && obj[key], target)
|
|
: target[keyPaths])
|
|
);
|
|
}
|
|
|
|
/**
|
|
* Given a filepath and dirpath, get the first directory.
|
|
*/
|
|
export function getSubFolder(file: string, refDir: string): string | null {
|
|
const separator = escapeStringRegexp(path.sep);
|
|
const baseDir = escapeStringRegexp(path.basename(refDir));
|
|
const regexSubFolder = new RegExp(
|
|
`${baseDir}${separator}(.*?)${separator}.*`,
|
|
);
|
|
const match = regexSubFolder.exec(file);
|
|
return match && match[1];
|
|
}
|
|
|
|
// Regex for an import statement.
|
|
const importRegexString = '^(.*import){1}(.+){0,1}\\s[\'"](.+)[\'"];?';
|
|
|
|
export function createExcerpt(fileString: string): string | undefined {
|
|
let fileContent = fileString.trimLeft();
|
|
|
|
if (RegExp(importRegexString).test(fileContent)) {
|
|
fileContent = fileContent
|
|
.replace(RegExp(importRegexString, 'gm'), '')
|
|
.trimLeft();
|
|
}
|
|
|
|
const fileLines = fileContent.split('\n');
|
|
|
|
for (const fileLine of fileLines) {
|
|
const cleanedLine = fileLine
|
|
// Remove HTML tags.
|
|
.replace(/<[^>]*>/g, '')
|
|
// Remove ATX-style headers.
|
|
.replace(/^\#{1,6}\s*([^#]*)\s*(\#{1,6})?/gm, '$1')
|
|
// Remove emphasis and strikethroughs.
|
|
.replace(/([\*_~]{1,3})(\S.*?\S{0,1})\1/g, '$2')
|
|
// Remove images.
|
|
.replace(/\!\[(.*?)\][\[\(].*?[\]\)]/g, '$1')
|
|
// Remove footnotes.
|
|
.replace(/\[\^.+?\](\: .*?$)?/g, '')
|
|
// Remove inline links.
|
|
.replace(/\[(.*?)\][\[\(].*?[\]\)]/g, '$1')
|
|
// Remove inline code.
|
|
.replace(/`(.+?)`/g, '$1')
|
|
// Remove blockquotes.
|
|
.replace(/^\s{0,3}>\s?/g, '')
|
|
// Remove admonition definition.
|
|
.replace(/(:{3}.*)/, '')
|
|
// Remove Emoji names within colons include preceding whitespace.
|
|
.replace(/\s?(:(::|[^:\n])+:)/g, '')
|
|
.trim();
|
|
|
|
if (cleanedLine) {
|
|
return cleanedLine;
|
|
}
|
|
}
|
|
|
|
return undefined;
|
|
}
|
|
|
|
type ParsedMarkdown = {
|
|
frontMatter: {
|
|
[key: string]: any;
|
|
};
|
|
content: string;
|
|
excerpt: string | undefined;
|
|
};
|
|
export function parseMarkdownString(markdownString: string): ParsedMarkdown {
|
|
const options: {} = {
|
|
excerpt: (file: matter.GrayMatterFile<string>): void => {
|
|
// Hacky way of stripping out import statements from the excerpt
|
|
// TODO: Find a better way to do so, possibly by compiling the Markdown content,
|
|
// stripping out HTML tags and obtaining the first line.
|
|
file.excerpt = createExcerpt(file.content);
|
|
},
|
|
};
|
|
|
|
try {
|
|
const {data: frontMatter, content, excerpt} = matter(
|
|
markdownString,
|
|
options,
|
|
);
|
|
return {frontMatter, content, excerpt};
|
|
} catch (e) {
|
|
throw new Error(`Error while parsing markdown front matter.
|
|
This can happen if you use special characteres like : in frontmatter values (try using "" around that value)
|
|
${e.message}`);
|
|
}
|
|
}
|
|
|
|
export async function parseMarkdownFile(
|
|
source: string,
|
|
): Promise<ParsedMarkdown> {
|
|
const markdownString = await fs.readFile(source, 'utf-8');
|
|
try {
|
|
return parseMarkdownString(markdownString);
|
|
} catch (e) {
|
|
throw new Error(
|
|
`Error while parsing markdown file ${source}
|
|
${e.message}`,
|
|
);
|
|
}
|
|
}
|
|
|
|
export function normalizeUrl(rawUrls: string[]): string {
|
|
const urls = rawUrls;
|
|
const resultArray = [];
|
|
|
|
// If the first part is a plain protocol, we combine it with the next part.
|
|
if (urls[0].match(/^[^/:]+:\/*$/) && urls.length > 1) {
|
|
const first = urls.shift();
|
|
urls[0] = first + urls[0];
|
|
}
|
|
|
|
// There must be two or three slashes in the file protocol,
|
|
// two slashes in anything else.
|
|
const replacement = urls[0].match(/^file:\/\/\//) ? '$1:///' : '$1://';
|
|
urls[0] = urls[0].replace(/^([^/:]+):\/*/, replacement);
|
|
|
|
// eslint-disable-next-line
|
|
for (let i = 0; i < urls.length; i++) {
|
|
let component = urls[i];
|
|
|
|
if (typeof component !== 'string') {
|
|
throw new TypeError(`Url must be a string. Received ${typeof component}`);
|
|
}
|
|
|
|
if (component === '') {
|
|
// eslint-disable-next-line
|
|
continue;
|
|
}
|
|
|
|
if (i > 0) {
|
|
// Removing the starting slashes for each component but the first.
|
|
component = component.replace(/^[/]+/, '');
|
|
}
|
|
|
|
// Removing the ending slashes for each component but the last.
|
|
// For the last component we will combine multiple slashes to a single one.
|
|
component = component.replace(/[/]+$/, i < urls.length - 1 ? '' : '/');
|
|
|
|
resultArray.push(component);
|
|
}
|
|
|
|
let str = resultArray.join('/');
|
|
// Each input component is now separated by a single slash
|
|
// except the possible first plain protocol part.
|
|
|
|
// Remove trailing slash before parameters or hash.
|
|
str = str.replace(/\/(\?|&|#[^!])/g, '$1');
|
|
|
|
// Replace ? in parameters with &.
|
|
const parts = str.split('?');
|
|
str = parts.shift() + (parts.length > 0 ? '?' : '') + parts.join('&');
|
|
|
|
// Dedupe forward slashes in the entire path, avoiding protocol slashes.
|
|
str = str.replace(/([^:]\/)\/+/g, '$1');
|
|
|
|
// Dedupe forward slashes at the beginning of the path.
|
|
str = str.replace(/^\/+/g, '/');
|
|
|
|
return str;
|
|
}
|
|
|
|
/**
|
|
* Alias filepath relative to site directory, very useful so that we
|
|
* don't expose user's site structure.
|
|
* Example: some/path/to/website/docs/foo.md -> @site/docs/foo.md
|
|
*/
|
|
export function aliasedSitePath(filePath: string, siteDir: string): string {
|
|
const relativePath = path.relative(siteDir, filePath);
|
|
// Cannot use path.join() as it resolves '../' and removes
|
|
// the '@site'. Let webpack loader resolve it.
|
|
return `@site/${relativePath}`;
|
|
}
|
|
|
|
export function getEditUrl(
|
|
fileRelativePath: string,
|
|
editUrl?: string,
|
|
): string | undefined {
|
|
return editUrl
|
|
? normalizeUrl([editUrl, posixPath(fileRelativePath)])
|
|
: undefined;
|
|
}
|
|
|
|
export function isValidPathname(str: string): boolean {
|
|
if (!str.startsWith('/')) {
|
|
return false;
|
|
}
|
|
try {
|
|
return new URL(str, 'https://domain.com').pathname === str;
|
|
} catch (e) {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
export function addTrailingSlash(str: string): string {
|
|
return str.endsWith('/') ? str : `${str}/`;
|
|
}
|
|
|
|
export function removeTrailingSlash(str: string): string {
|
|
return removeSuffix(str, '/');
|
|
}
|
|
|
|
export function removeSuffix(str: string, suffix: string): string {
|
|
if (suffix === '') {
|
|
return str; // always returns "" otherwise!
|
|
}
|
|
return str.endsWith(suffix) ? str.slice(0, -suffix.length) : str;
|
|
}
|
|
|
|
export function getFilePathForRoutePath(routePath: string): string {
|
|
const fileName = path.basename(routePath);
|
|
const filePath = path.dirname(routePath);
|
|
return path.join(filePath, `${fileName}/index.html`);
|
|
}
|