chore: clean up ESLint config, enable a few rules (#6514)

* chore: clean up ESLint config, enable a few rules

* enable max-len for comments

* fix build
This commit is contained in:
Joshua Chen 2022-01-31 10:31:24 +08:00 committed by GitHub
parent b8ccb869f1
commit aa446b7a9c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
167 changed files with 1157 additions and 960 deletions

View file

@ -75,7 +75,8 @@ describe('load utils', () => {
genChunkName('path/is/similar', 'newPrefix'),
);
// Even with same preferred name, still different chunk name for different path
// Even with same preferred name, still different chunk name for
// different path
const secondAssert: Record<string, string> = {
'/blog/1': 'blog-85-f-089',
'/blog/2': 'blog-353-489',

View file

@ -37,7 +37,8 @@ describe('createExcerpt', () => {
Nunc porttitor libero nec vulputate venenatis. Nam nec rhoncus mauris. Morbi tempus est et nibh maximus, tempus venenatis arcu lobortis.
`),
).toEqual(
// h1 title is skipped on purpose, because we don't want the page to have SEO metadata title === description
// h1 title is skipped on purpose, because we don't want the page to have
// SEO metadata title === description
'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum ex urna, molestie et sagittis ut, varius ac justo.',
);
});
@ -54,7 +55,8 @@ describe('createExcerpt', () => {
Nunc porttitor libero nec vulputate venenatis. Nam nec rhoncus mauris. Morbi tempus est et nibh maximus, tempus venenatis arcu lobortis.
`),
).toEqual(
// h1 title is skipped on purpose, because we don't want the page to have SEO metadata title === description
// h1 title is skipped on purpose, because we don't want the page to have
// SEO metadata title === description
'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum ex urna, molestie et sagittis ut, varius ac justo.',
);
});

View file

@ -70,7 +70,8 @@ describe('shortName', () => {
:
{
apfs: '字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字',
// This is pretty bad (a character clipped in half), but I doubt if it ever happens
// This is pretty bad (a character clipped in half), but I doubt if it
// ever happens
xfs: '字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字字<E5AD97>',
},
};

View file

@ -200,7 +200,8 @@ export function getPluginI18nPath({
return path.join(
siteDir,
'i18n',
// namespace first by locale: convenient to work in a single folder for a translator
// namespace first by locale: convenient to work in a single folder for a
// translator
locale,
// Make it convenient to use for single-instance
// ie: return "docs", not "docs-default" nor "docs/default"
@ -212,7 +213,8 @@ export function getPluginI18nPath({
/**
* @param permalink The URL that the HTML file corresponds to, without base URL
* @param outDir Full path to the output directory
* @param trailingSlash The site config option. If provided, only one path will be read.
* @param trailingSlash The site config option. If provided, only one path will
* be read.
* @returns This returns a buffer, which you have to decode string yourself if
* needed. (Not always necessary since the output isn't for human consumption
* anyways, and most HTML manipulation libs accept buffers)
@ -231,18 +233,17 @@ export async function readOutputHTMLFile(
return fs.readFile(withTrailingSlashPath);
} else if (trailingSlash === false) {
return fs.readFile(withoutTrailingSlashPath);
} else {
const HTMLPath = await findAsyncSequential(
[withTrailingSlashPath, withoutTrailingSlashPath],
fs.pathExists,
);
if (!HTMLPath) {
throw new Error(
`Expected output HTML file to be found at ${withTrailingSlashPath}`,
);
}
return fs.readFile(HTMLPath);
}
const HTMLPath = await findAsyncSequential(
[withTrailingSlashPath, withoutTrailingSlashPath],
fs.pathExists,
);
if (!HTMLPath) {
throw new Error(
`Expected output HTML file to be found at ${withTrailingSlashPath}`,
);
}
return fs.readFile(HTMLPath);
}
export async function mapAsyncSequential<T, R>(

View file

@ -51,7 +51,8 @@ export function replaceMarkdownLinks<T extends ContentPaths>({
if (!fencedBlock) {
fencedBlock = true;
[lastCodeFence] = line.trim().match(/^`+/)!;
// If we are in a ````-fenced block, all ``` would be plain text instead of fences
// If we are in a ````-fenced block, all ``` would be plain text instead
// of fences
} else if (line.trim().match(/^`+/)![0].length >= lastCodeFence.length) {
fencedBlock = false;
}
@ -62,7 +63,8 @@ export function replaceMarkdownLinks<T extends ContentPaths>({
let modifiedLine = line;
// Replace inline-style links or reference-style links e.g:
// This is [Document 1](doc1.md) -> we replace this doc1.md with correct link
// This is [Document 1](doc1.md) -> we replace this doc1.md with correct
// ink
// [doc1]: doc1.md -> we replace this doc1.md with correct link
const mdRegex =
/(?:(?:\]\()|(?:\]:\s?))(?!https?:\/\/|@site\/)([^'")\]\s>]+\.mdx?)/g;
@ -86,7 +88,8 @@ export function replaceMarkdownLinks<T extends ContentPaths>({
: undefined;
if (permalink) {
// MDX won't be happy if the permalink contains a space, we need to convert it to %20
// MDX won't be happy if the permalink contains a space, we need to
// convert it to %20
const encodedPermalink = permalink
.split('/')
.map((part) => part.replace(/\s/g, '%20'))

View file

@ -21,9 +21,8 @@ export function parseMarkdownHeadingId(heading: string): {
text: matches[1],
id: matches[2],
};
} else {
return {text: heading, id: undefined};
}
return {text: heading, id: undefined};
}
// Hacky way of stripping out import statements from the excerpt
@ -56,7 +55,8 @@ export function createExcerpt(fileString: string): string | undefined {
if (!inCode) {
inCode = true;
[lastCodeFence] = fileLine.trim().match(/^`+/)!;
// If we are in a ````-fenced block, all ``` would be plain text instead of fences
// If we are in a ````-fenced block, all ``` would be plain text instead
// of fences
} else if (
fileLine.trim().match(/^`+/)![0].length >= lastCodeFence.length
) {
@ -113,9 +113,11 @@ export function parseFrontMatter(markdownFileContent: string): {
};
}
// Try to convert markdown heading as text
// Does not need to be perfect, it is only used as a fallback when frontMatter.title is not provided
// For now, we just unwrap possible inline code blocks (# `config.js`)
/**
* Try to convert markdown heading to text. Does not need to be perfect, it is
* only used as a fallback when frontMatter.title is not provided. For now, we
* just unwrap possible inline code blocks (# `config.js`)
*/
function toTextContentTitle(contentTitle: string): string {
if (contentTitle.startsWith('`') && contentTitle.endsWith('`')) {
return contentTitle.substring(1, contentTitle.length - 1);
@ -152,15 +154,14 @@ export function parseMarkdownContentTitle(
if (!pattern || !title) {
return {content, contentTitle: undefined};
} else {
const newContent = removeContentTitleOption
? content.replace(pattern, '')
: content;
return {
content: newContent.trim(),
contentTitle: toTextContentTitle(title.trim()).trim(),
};
}
const newContent = removeContentTitleOption
? content.replace(pattern, '')
: content;
return {
content: newContent.trim(),
contentTitle: toTextContentTitle(title.trim()).trim(),
};
}
type ParsedMarkdown = {

View file

@ -9,7 +9,8 @@
import path from 'path';
// MacOS (APFS) and Windows (NTFS) filename length limit = 255 chars, Others = 255 bytes
// MacOS (APFS) and Windows (NTFS) filename length limit = 255 chars,
// Others = 255 bytes
const MAX_PATH_SEGMENT_CHARS = 255;
const MAX_PATH_SEGMENT_BYTES = 255;
// Space for appending things to the string like file extensions and so on
@ -19,7 +20,7 @@ const isMacOs = () => process.platform === 'darwin';
const isWindows = () => process.platform === 'win32';
export const isNameTooLong = (str: string): boolean =>
// This is actually not entirely correct: we can't assume FS from OS. But good enough?
// Not entirely correct: we can't assume FS from OS. But good enough?
isMacOs() || isWindows()
? str.length + SPACE_FOR_APPENDING > MAX_PATH_SEGMENT_CHARS // MacOS (APFS) and Windows (NTFS) filename length limit (255 chars)
: Buffer.from(str).length + SPACE_FOR_APPENDING > MAX_PATH_SEGMENT_BYTES; // Other (255 bytes)
@ -56,7 +57,8 @@ export const shortName = (str: string): string => {
export function posixPath(str: string): string {
const isExtendedLengthPath = /^\\\\\?\\/.test(str);
// Forward slashes are only valid Windows paths when they don't contain non-ascii characters.
// Forward slashes are only valid Windows paths when they don't contain non-
// ascii characters.
// eslint-disable-next-line no-control-regex
const hasNonAscii = /[^\u0000-\u0080]+/.test(str);
@ -66,13 +68,18 @@ export function posixPath(str: string): string {
return str.replace(/\\/g, '/');
}
// When you want to display a path in a message/warning/error,
// it's more convenient to:
// - make it relative to cwd()
// - convert to posix (ie not using windows \ path separator)
// This way, Jest tests can run more reliably on any computer/CI
// on both Unix/Windows
// For Windows users this is not perfect (as they see / instead of \) but it's probably good enough
/**
* When you want to display a path in a message/warning/error, it's more
* convenient to:
*
* - make it relative to `cwd()`
* - convert to posix (ie not using windows \ path separator)
*
* This way, Jest tests can run more reliably on any computer/CI on both
* Unix/Windows
* For Windows users this is not perfect (as they see / instead of \) but it's
* probably good enough
*/
export function toMessageRelativeFilePath(filePath: string): string {
return posixPath(path.relative(process.cwd(), filePath));
}
@ -92,7 +99,8 @@ export function aliasedSitePath(filePath: string, siteDir: string): string {
/**
* When you have a path like C:\X\Y
* It is not safe to use directly when generating code
* For example, this would fail due to unescaped \: `<img src={require('${filePath}')} />`
* For example, this would fail due to unescaped \:
* `<img src={require('${filePath}')} />`
* But this would work: `<img src={require('${escapePath(filePath)}')} />`
*
* posixPath can't be used in all cases, because forward slashes are only valid

View file

@ -28,9 +28,9 @@ export function normalizeFrontMatterTag(
// TODO maybe make ensure the permalink is valid url path?
function normalizeTagPermalink(permalink: string): string {
// note: we always apply tagsPath on purpose
// for versioned docs, v1/doc.md and v2/doc.md tags with custom permalinks don't lead to the same created page
// tagsPath is different for each doc version
// note: we always apply tagsPath on purpose. For versioned docs, v1/doc.md
// and v2/doc.md tags with custom permalinks don't lead to the same created
// page. tagsPath is different for each doc version
return normalizeUrl([tagsPath, permalink]);
}
@ -61,11 +61,14 @@ export type TaggedItemGroup<Item> = {
items: Item[];
};
// Permits to group docs/blogPosts by tag (provided by FrontMatter)
// Note: groups are indexed by permalink, because routes must be unique in the end
// Labels may vary on 2 md files but they are normalized.
// Docs with label='some label' and label='some-label' should end-up in the same group/page in the end
// We can't create 2 routes /some-label because one would override the other
/**
* Permits to group docs/blogPosts by tag (provided by front matter)
* Note: groups are indexed by permalink, because routes must be unique in the
* end. Labels may vary on 2 md files but they are normalized. Docs with
* label='some label' and label='some-label' should end-up in the same
* group/page in the end. We can't create 2 routes /some-label because one would
* override the other
*/
export function groupTaggedItems<Item>(
items: Item[],
getItemTags: (item: Item) => Tag[],
@ -74,7 +77,8 @@ export function groupTaggedItems<Item>(
function handleItemTag(item: Item, tag: Tag) {
// Init missing tag groups
// TODO: it's not really clear what should be the behavior if 2 items have the same tag but the permalink is different for each
// TODO: it's not really clear what should be the behavior if 2 items have
// the same tag but the permalink is different for each
// For now, the first tag found wins
result[tag.permalink] = result[tag.permalink] ?? {
tag,

View file

@ -16,7 +16,8 @@ export function normalizeUrl(rawUrls: string[]): string {
if (urls[0].match(/^[^/:]+:\/*$/) && urls.length > 1) {
const first = urls.shift();
if (first!.startsWith('file:') && urls[0].startsWith('/')) {
// Force a double slash here, else we lose the information that the next segment is an absolute path
// Force a double slash here, else we lose the information that the next
// segment is an absolute path
urls[0] = `${first}//${urls[0]}`;
} else {
urls[0] = first + urls[0];
@ -48,14 +49,15 @@ export function normalizeUrl(rawUrls: string[]): string {
// Removing the starting slashes for each component but the first.
component = component.replace(
/^[/]+/,
// Special case where the first element of rawUrls is empty ["", "/hello"] => /hello
// Special case where the first element of rawUrls is empty
// ["", "/hello"] => /hello
component[0] === '/' && !hasStartingSlash ? '/' : '',
);
}
hasEndingSlash = component[component.length - 1] === '/';
// Removing the ending slashes for each component but the last.
// For the last component we will combine multiple slashes to a single one.
// Removing the ending slashes for each component but the last. For the
// last component we will combine multiple slashes to a single one.
component = component.replace(/[/]+$/, i < urls.length - 1 ? '' : '/');
}

View file

@ -33,7 +33,8 @@ type FileLoaderUtils = {
// Inspired by https://github.com/gatsbyjs/gatsby/blob/8e6e021014da310b9cc7d02e58c9b3efe938c665/packages/gatsby/src/utils/webpack-utils.ts#L447
export function getFileLoaderUtils(): FileLoaderUtils {
// files/images < urlLoaderLimit will be inlined as base64 strings directly in the html
// files/images < urlLoaderLimit will be inlined as base64 strings directly in
// the html
const urlLoaderLimit = WEBPACK_URL_LOADER_LIMIT;
// defines the path/pattern of the assets handled by webpack
@ -56,7 +57,7 @@ export function getFileLoaderUtils(): FileLoaderUtils {
},
}),
// TODO find a better solution to avoid conflicts with the ideal-image plugin
// TODO avoid conflicts with the ideal-image plugin
// TODO this may require a little breaking change for ideal-image users?
// Maybe with the ideal image plugin, all md images should be "ideal"?
// This is used to force url-loader+file-loader on markdown images