Website Structure
This commit is contained in:
parent
62812f2090
commit
71f0676a62
22365 changed files with 4265753 additions and 791 deletions
8
Frontend-Learner/node_modules/diff/libesm/convert/dmp.d.ts
generated
vendored
Normal file
8
Frontend-Learner/node_modules/diff/libesm/convert/dmp.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
import type { ChangeObject } from '../types.js';
|
||||
type DmpOperation = 1 | 0 | -1;
|
||||
/**
|
||||
* converts a list of change objects to the format returned by Google's [diff-match-patch](https://github.com/google/diff-match-patch) library
|
||||
*/
|
||||
export declare function convertChangesToDMP<ValueT>(changes: ChangeObject<ValueT>[]): [DmpOperation, ValueT][];
|
||||
export {};
|
||||
//# sourceMappingURL=dmp.d.ts.map
|
||||
1
Frontend-Learner/node_modules/diff/libesm/convert/dmp.d.ts.map
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/convert/dmp.d.ts.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"dmp.d.ts","sourceRoot":"","sources":["../../src/convert/dmp.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,aAAa,CAAC;AAE9C,KAAK,YAAY,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC;AAE/B;;GAEG;AACH,wBAAgB,mBAAmB,CAAC,MAAM,EAAE,OAAO,EAAE,YAAY,CAAC,MAAM,CAAC,EAAE,GAAG,CAAC,YAAY,EAAE,MAAM,CAAC,EAAE,CAiBrG"}
|
||||
21
Frontend-Learner/node_modules/diff/libesm/convert/dmp.js
generated
vendored
Normal file
21
Frontend-Learner/node_modules/diff/libesm/convert/dmp.js
generated
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
/**
|
||||
* converts a list of change objects to the format returned by Google's [diff-match-patch](https://github.com/google/diff-match-patch) library
|
||||
*/
|
||||
export function convertChangesToDMP(changes) {
|
||||
const ret = [];
|
||||
let change, operation;
|
||||
for (let i = 0; i < changes.length; i++) {
|
||||
change = changes[i];
|
||||
if (change.added) {
|
||||
operation = 1;
|
||||
}
|
||||
else if (change.removed) {
|
||||
operation = -1;
|
||||
}
|
||||
else {
|
||||
operation = 0;
|
||||
}
|
||||
ret.push([operation, change.value]);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
6
Frontend-Learner/node_modules/diff/libesm/convert/xml.d.ts
generated
vendored
Normal file
6
Frontend-Learner/node_modules/diff/libesm/convert/xml.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
import type { ChangeObject } from '../types.js';
|
||||
/**
|
||||
* converts a list of change objects to a serialized XML format
|
||||
*/
|
||||
export declare function convertChangesToXML(changes: ChangeObject<string>[]): string;
|
||||
//# sourceMappingURL=xml.d.ts.map
|
||||
1
Frontend-Learner/node_modules/diff/libesm/convert/xml.d.ts.map
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/convert/xml.d.ts.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"xml.d.ts","sourceRoot":"","sources":["../../src/convert/xml.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,aAAa,CAAC;AAE9C;;GAEG;AACH,wBAAgB,mBAAmB,CAAC,OAAO,EAAE,YAAY,CAAC,MAAM,CAAC,EAAE,GAAG,MAAM,CAmB3E"}
|
||||
31
Frontend-Learner/node_modules/diff/libesm/convert/xml.js
generated
vendored
Normal file
31
Frontend-Learner/node_modules/diff/libesm/convert/xml.js
generated
vendored
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
/**
|
||||
* converts a list of change objects to a serialized XML format
|
||||
*/
|
||||
export function convertChangesToXML(changes) {
|
||||
const ret = [];
|
||||
for (let i = 0; i < changes.length; i++) {
|
||||
const change = changes[i];
|
||||
if (change.added) {
|
||||
ret.push('<ins>');
|
||||
}
|
||||
else if (change.removed) {
|
||||
ret.push('<del>');
|
||||
}
|
||||
ret.push(escapeHTML(change.value));
|
||||
if (change.added) {
|
||||
ret.push('</ins>');
|
||||
}
|
||||
else if (change.removed) {
|
||||
ret.push('</del>');
|
||||
}
|
||||
}
|
||||
return ret.join('');
|
||||
}
|
||||
function escapeHTML(s) {
|
||||
let n = s;
|
||||
n = n.replace(/&/g, '&');
|
||||
n = n.replace(/</g, '<');
|
||||
n = n.replace(/>/g, '>');
|
||||
n = n.replace(/"/g, '"');
|
||||
return n;
|
||||
}
|
||||
19
Frontend-Learner/node_modules/diff/libesm/diff/array.d.ts
generated
vendored
Normal file
19
Frontend-Learner/node_modules/diff/libesm/diff/array.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
import Diff from './base.js';
|
||||
import type { ChangeObject, DiffArraysOptionsNonabortable, CallbackOptionNonabortable, DiffArraysOptionsAbortable, DiffCallbackNonabortable, CallbackOptionAbortable } from '../types.js';
|
||||
declare class ArrayDiff<T> extends Diff<T, Array<T>> {
|
||||
tokenize(value: Array<T>): T[];
|
||||
join(value: Array<T>): T[];
|
||||
removeEmpty(value: Array<T>): T[];
|
||||
}
|
||||
export declare const arrayDiff: ArrayDiff<unknown>;
|
||||
/**
|
||||
* diffs two arrays of tokens, comparing each item for strict equality (===).
|
||||
* @returns a list of change objects.
|
||||
*/
|
||||
export declare function diffArrays<T>(oldArr: T[], newArr: T[], options: DiffCallbackNonabortable<T[]>): undefined;
|
||||
export declare function diffArrays<T>(oldArr: T[], newArr: T[], options: DiffArraysOptionsAbortable<T> & CallbackOptionAbortable<T[]>): undefined;
|
||||
export declare function diffArrays<T>(oldArr: T[], newArr: T[], options: DiffArraysOptionsNonabortable<T> & CallbackOptionNonabortable<T[]>): undefined;
|
||||
export declare function diffArrays<T>(oldArr: T[], newArr: T[], options: DiffArraysOptionsAbortable<T>): ChangeObject<T[]>[] | undefined;
|
||||
export declare function diffArrays<T>(oldArr: T[], newArr: T[], options?: DiffArraysOptionsNonabortable<T>): ChangeObject<T[]>[];
|
||||
export {};
|
||||
//# sourceMappingURL=array.d.ts.map
|
||||
1
Frontend-Learner/node_modules/diff/libesm/diff/array.d.ts.map
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/diff/array.d.ts.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"array.d.ts","sourceRoot":"","sources":["../../src/diff/array.ts"],"names":[],"mappings":"AAAA,OAAO,IAAI,MAAM,WAAW,CAAC;AAC7B,OAAO,KAAK,EAAC,YAAY,EAAE,6BAA6B,EAAE,0BAA0B,EAAE,0BAA0B,EAAE,wBAAwB,EAAE,uBAAuB,EAAC,MAAM,aAAa,CAAC;AAExL,cAAM,SAAS,CAAC,CAAC,CAAE,SAAQ,IAAI,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,CAAC,CAAC;IAC1C,QAAQ,CAAC,KAAK,EAAE,KAAK,CAAC,CAAC,CAAC;IAIxB,IAAI,CAAC,KAAK,EAAE,KAAK,CAAC,CAAC,CAAC;IAIpB,WAAW,CAAC,KAAK,EAAE,KAAK,CAAC,CAAC,CAAC;CAG5B;AAED,eAAO,MAAM,SAAS,oBAAkB,CAAC;AAEzC;;;GAGG;AACH,wBAAgB,UAAU,CAAC,CAAC,EAC1B,MAAM,EAAE,CAAC,EAAE,EACX,MAAM,EAAE,CAAC,EAAE,EACX,OAAO,EAAE,wBAAwB,CAAC,CAAC,EAAE,CAAC,GACrC,SAAS,CAAC;AACb,wBAAgB,UAAU,CAAC,CAAC,EAC1B,MAAM,EAAE,CAAC,EAAE,EACX,MAAM,EAAE,CAAC,EAAE,EACX,OAAO,EAAE,0BAA0B,CAAC,CAAC,CAAC,GAAG,uBAAuB,CAAC,CAAC,EAAE,CAAC,GACpE,SAAS,CAAA;AACZ,wBAAgB,UAAU,CAAC,CAAC,EAC1B,MAAM,EAAE,CAAC,EAAE,EACX,MAAM,EAAE,CAAC,EAAE,EACX,OAAO,EAAE,6BAA6B,CAAC,CAAC,CAAC,GAAG,0BAA0B,CAAC,CAAC,EAAE,CAAC,GAC1E,SAAS,CAAA;AACZ,wBAAgB,UAAU,CAAC,CAAC,EAC1B,MAAM,EAAE,CAAC,EAAE,EACX,MAAM,EAAE,CAAC,EAAE,EACX,OAAO,EAAE,0BAA0B,CAAC,CAAC,CAAC,GACrC,YAAY,CAAC,CAAC,EAAE,CAAC,EAAE,GAAG,SAAS,CAAA;AAClC,wBAAgB,UAAU,CAAC,CAAC,EAC1B,MAAM,EAAE,CAAC,EAAE,EACX,MAAM,EAAE,CAAC,EAAE,EACX,OAAO,CAAC,EAAE,6BAA6B,CAAC,CAAC,CAAC,GACzC,YAAY,CAAC,CAAC,EAAE,CAAC,EAAE,CAAA"}
|
||||
16
Frontend-Learner/node_modules/diff/libesm/diff/array.js
generated
vendored
Normal file
16
Frontend-Learner/node_modules/diff/libesm/diff/array.js
generated
vendored
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
import Diff from './base.js';
|
||||
class ArrayDiff extends Diff {
|
||||
tokenize(value) {
|
||||
return value.slice();
|
||||
}
|
||||
join(value) {
|
||||
return value;
|
||||
}
|
||||
removeEmpty(value) {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
export const arrayDiff = new ArrayDiff();
|
||||
export function diffArrays(oldArr, newArr, options) {
|
||||
return arrayDiff.diff(oldArr, newArr, options);
|
||||
}
|
||||
20
Frontend-Learner/node_modules/diff/libesm/diff/base.d.ts
generated
vendored
Normal file
20
Frontend-Learner/node_modules/diff/libesm/diff/base.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
import type { ChangeObject, AllDiffOptions, AbortableDiffOptions, DiffCallbackNonabortable, CallbackOptionAbortable, CallbackOptionNonabortable } from '../types.js';
|
||||
export default class Diff<TokenT, ValueT extends Iterable<TokenT> = Iterable<TokenT>, InputValueT = ValueT> {
|
||||
diff(oldStr: InputValueT, newStr: InputValueT, options: DiffCallbackNonabortable<ValueT>): undefined;
|
||||
diff(oldStr: InputValueT, newStr: InputValueT, options: AllDiffOptions & AbortableDiffOptions & CallbackOptionAbortable<ValueT>): undefined;
|
||||
diff(oldStr: InputValueT, newStr: InputValueT, options: AllDiffOptions & CallbackOptionNonabortable<ValueT>): undefined;
|
||||
diff(oldStr: InputValueT, newStr: InputValueT, options: AllDiffOptions & AbortableDiffOptions): ChangeObject<ValueT>[] | undefined;
|
||||
diff(oldStr: InputValueT, newStr: InputValueT, options?: AllDiffOptions): ChangeObject<ValueT>[];
|
||||
private diffWithOptionsObj;
|
||||
private addToPath;
|
||||
private extractCommon;
|
||||
equals(left: TokenT, right: TokenT, options: AllDiffOptions): boolean;
|
||||
removeEmpty(array: TokenT[]): TokenT[];
|
||||
castInput(value: InputValueT, options: AllDiffOptions): ValueT;
|
||||
tokenize(value: ValueT, options: AllDiffOptions): TokenT[];
|
||||
join(chars: TokenT[]): ValueT;
|
||||
postProcess(changeObjects: ChangeObject<ValueT>[], options: AllDiffOptions): ChangeObject<ValueT>[];
|
||||
get useLongestToken(): boolean;
|
||||
private buildValues;
|
||||
}
|
||||
//# sourceMappingURL=base.d.ts.map
|
||||
1
Frontend-Learner/node_modules/diff/libesm/diff/base.d.ts.map
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/diff/base.d.ts.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"base.d.ts","sourceRoot":"","sources":["../../src/diff/base.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,YAAY,EAAE,cAAc,EAAE,oBAAoB,EAAE,wBAAwB,EAAE,uBAAuB,EAAE,0BAA0B,EAA4D,MAAM,aAAa,CAAC;AAuB9N,MAAM,CAAC,OAAO,OAAO,IAAI,CACvB,MAAM,EACN,MAAM,SAAS,QAAQ,CAAC,MAAM,CAAC,GAAG,QAAQ,CAAC,MAAM,CAAC,EAClD,WAAW,GAAG,MAAM;IAEpB,IAAI,CACF,MAAM,EAAE,WAAW,EACnB,MAAM,EAAE,WAAW,EACnB,OAAO,EAAE,wBAAwB,CAAC,MAAM,CAAC,GACxC,SAAS;IACZ,IAAI,CACF,MAAM,EAAE,WAAW,EACnB,MAAM,EAAE,WAAW,EACnB,OAAO,EAAE,cAAc,GAAG,oBAAoB,GAAG,uBAAuB,CAAC,MAAM,CAAC,GAC/E,SAAS;IACZ,IAAI,CACF,MAAM,EAAE,WAAW,EACnB,MAAM,EAAE,WAAW,EACnB,OAAO,EAAE,cAAc,GAAG,0BAA0B,CAAC,MAAM,CAAC,GAC3D,SAAS;IACZ,IAAI,CACF,MAAM,EAAE,WAAW,EACnB,MAAM,EAAE,WAAW,EACnB,OAAO,EAAE,cAAc,GAAG,oBAAoB,GAC7C,YAAY,CAAC,MAAM,CAAC,EAAE,GAAG,SAAS;IACrC,IAAI,CACF,MAAM,EAAE,WAAW,EACnB,MAAM,EAAE,WAAW,EACnB,OAAO,CAAC,EAAE,cAAc,GACvB,YAAY,CAAC,MAAM,CAAC,EAAE;IAwBzB,OAAO,CAAC,kBAAkB;IA0I1B,OAAO,CAAC,SAAS;IAqBjB,OAAO,CAAC,aAAa;IA8BrB,MAAM,CAAC,IAAI,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,cAAc,GAAG,OAAO;IASrE,WAAW,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,MAAM,EAAE;IAWtC,SAAS,CAAC,KAAK,EAAE,WAAW,EAAE,OAAO,EAAE,cAAc,GAAG,MAAM;IAK9D,QAAQ,CAAC,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,cAAc,GAAG,MAAM,EAAE;IAI1D,IAAI,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,MAAM;IAQ7B,WAAW,CACT,aAAa,EAAE,YAAY,CAAC,MAAM,CAAC,EAAE,EAErC,OAAO,EAAE,cAAc,GACtB,YAAY,CAAC,MAAM,CAAC,EAAE;IAIzB,IAAI,eAAe,IAAI,OAAO,CAE7B;IAED,OAAO,CAAC,WAAW;CAkDpB"}
|
||||
253
Frontend-Learner/node_modules/diff/libesm/diff/base.js
generated
vendored
Normal file
253
Frontend-Learner/node_modules/diff/libesm/diff/base.js
generated
vendored
Normal file
|
|
@ -0,0 +1,253 @@
|
|||
export default class Diff {
|
||||
diff(oldStr, newStr,
|
||||
// Type below is not accurate/complete - see above for full possibilities - but it compiles
|
||||
options = {}) {
|
||||
let callback;
|
||||
if (typeof options === 'function') {
|
||||
callback = options;
|
||||
options = {};
|
||||
}
|
||||
else if ('callback' in options) {
|
||||
callback = options.callback;
|
||||
}
|
||||
// Allow subclasses to massage the input prior to running
|
||||
const oldString = this.castInput(oldStr, options);
|
||||
const newString = this.castInput(newStr, options);
|
||||
const oldTokens = this.removeEmpty(this.tokenize(oldString, options));
|
||||
const newTokens = this.removeEmpty(this.tokenize(newString, options));
|
||||
return this.diffWithOptionsObj(oldTokens, newTokens, options, callback);
|
||||
}
|
||||
diffWithOptionsObj(oldTokens, newTokens, options, callback) {
|
||||
var _a;
|
||||
const done = (value) => {
|
||||
value = this.postProcess(value, options);
|
||||
if (callback) {
|
||||
setTimeout(function () { callback(value); }, 0);
|
||||
return undefined;
|
||||
}
|
||||
else {
|
||||
return value;
|
||||
}
|
||||
};
|
||||
const newLen = newTokens.length, oldLen = oldTokens.length;
|
||||
let editLength = 1;
|
||||
let maxEditLength = newLen + oldLen;
|
||||
if (options.maxEditLength != null) {
|
||||
maxEditLength = Math.min(maxEditLength, options.maxEditLength);
|
||||
}
|
||||
const maxExecutionTime = (_a = options.timeout) !== null && _a !== void 0 ? _a : Infinity;
|
||||
const abortAfterTimestamp = Date.now() + maxExecutionTime;
|
||||
const bestPath = [{ oldPos: -1, lastComponent: undefined }];
|
||||
// Seed editLength = 0, i.e. the content starts with the same values
|
||||
let newPos = this.extractCommon(bestPath[0], newTokens, oldTokens, 0, options);
|
||||
if (bestPath[0].oldPos + 1 >= oldLen && newPos + 1 >= newLen) {
|
||||
// Identity per the equality and tokenizer
|
||||
return done(this.buildValues(bestPath[0].lastComponent, newTokens, oldTokens));
|
||||
}
|
||||
// Once we hit the right edge of the edit graph on some diagonal k, we can
|
||||
// definitely reach the end of the edit graph in no more than k edits, so
|
||||
// there's no point in considering any moves to diagonal k+1 any more (from
|
||||
// which we're guaranteed to need at least k+1 more edits).
|
||||
// Similarly, once we've reached the bottom of the edit graph, there's no
|
||||
// point considering moves to lower diagonals.
|
||||
// We record this fact by setting minDiagonalToConsider and
|
||||
// maxDiagonalToConsider to some finite value once we've hit the edge of
|
||||
// the edit graph.
|
||||
// This optimization is not faithful to the original algorithm presented in
|
||||
// Myers's paper, which instead pointlessly extends D-paths off the end of
|
||||
// the edit graph - see page 7 of Myers's paper which notes this point
|
||||
// explicitly and illustrates it with a diagram. This has major performance
|
||||
// implications for some common scenarios. For instance, to compute a diff
|
||||
// where the new text simply appends d characters on the end of the
|
||||
// original text of length n, the true Myers algorithm will take O(n+d^2)
|
||||
// time while this optimization needs only O(n+d) time.
|
||||
let minDiagonalToConsider = -Infinity, maxDiagonalToConsider = Infinity;
|
||||
// Main worker method. checks all permutations of a given edit length for acceptance.
|
||||
const execEditLength = () => {
|
||||
for (let diagonalPath = Math.max(minDiagonalToConsider, -editLength); diagonalPath <= Math.min(maxDiagonalToConsider, editLength); diagonalPath += 2) {
|
||||
let basePath;
|
||||
const removePath = bestPath[diagonalPath - 1], addPath = bestPath[diagonalPath + 1];
|
||||
if (removePath) {
|
||||
// No one else is going to attempt to use this value, clear it
|
||||
// @ts-expect-error - perf optimisation. This type-violating value will never be read.
|
||||
bestPath[diagonalPath - 1] = undefined;
|
||||
}
|
||||
let canAdd = false;
|
||||
if (addPath) {
|
||||
// what newPos will be after we do an insertion:
|
||||
const addPathNewPos = addPath.oldPos - diagonalPath;
|
||||
canAdd = addPath && 0 <= addPathNewPos && addPathNewPos < newLen;
|
||||
}
|
||||
const canRemove = removePath && removePath.oldPos + 1 < oldLen;
|
||||
if (!canAdd && !canRemove) {
|
||||
// If this path is a terminal then prune
|
||||
// @ts-expect-error - perf optimisation. This type-violating value will never be read.
|
||||
bestPath[diagonalPath] = undefined;
|
||||
continue;
|
||||
}
|
||||
// Select the diagonal that we want to branch from. We select the prior
|
||||
// path whose position in the old string is the farthest from the origin
|
||||
// and does not pass the bounds of the diff graph
|
||||
if (!canRemove || (canAdd && removePath.oldPos < addPath.oldPos)) {
|
||||
basePath = this.addToPath(addPath, true, false, 0, options);
|
||||
}
|
||||
else {
|
||||
basePath = this.addToPath(removePath, false, true, 1, options);
|
||||
}
|
||||
newPos = this.extractCommon(basePath, newTokens, oldTokens, diagonalPath, options);
|
||||
if (basePath.oldPos + 1 >= oldLen && newPos + 1 >= newLen) {
|
||||
// If we have hit the end of both strings, then we are done
|
||||
return done(this.buildValues(basePath.lastComponent, newTokens, oldTokens)) || true;
|
||||
}
|
||||
else {
|
||||
bestPath[diagonalPath] = basePath;
|
||||
if (basePath.oldPos + 1 >= oldLen) {
|
||||
maxDiagonalToConsider = Math.min(maxDiagonalToConsider, diagonalPath - 1);
|
||||
}
|
||||
if (newPos + 1 >= newLen) {
|
||||
minDiagonalToConsider = Math.max(minDiagonalToConsider, diagonalPath + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
editLength++;
|
||||
};
|
||||
// Performs the length of edit iteration. Is a bit fugly as this has to support the
|
||||
// sync and async mode which is never fun. Loops over execEditLength until a value
|
||||
// is produced, or until the edit length exceeds options.maxEditLength (if given),
|
||||
// in which case it will return undefined.
|
||||
if (callback) {
|
||||
(function exec() {
|
||||
setTimeout(function () {
|
||||
if (editLength > maxEditLength || Date.now() > abortAfterTimestamp) {
|
||||
return callback(undefined);
|
||||
}
|
||||
if (!execEditLength()) {
|
||||
exec();
|
||||
}
|
||||
}, 0);
|
||||
}());
|
||||
}
|
||||
else {
|
||||
while (editLength <= maxEditLength && Date.now() <= abortAfterTimestamp) {
|
||||
const ret = execEditLength();
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
addToPath(path, added, removed, oldPosInc, options) {
|
||||
const last = path.lastComponent;
|
||||
if (last && !options.oneChangePerToken && last.added === added && last.removed === removed) {
|
||||
return {
|
||||
oldPos: path.oldPos + oldPosInc,
|
||||
lastComponent: { count: last.count + 1, added: added, removed: removed, previousComponent: last.previousComponent }
|
||||
};
|
||||
}
|
||||
else {
|
||||
return {
|
||||
oldPos: path.oldPos + oldPosInc,
|
||||
lastComponent: { count: 1, added: added, removed: removed, previousComponent: last }
|
||||
};
|
||||
}
|
||||
}
|
||||
extractCommon(basePath, newTokens, oldTokens, diagonalPath, options) {
|
||||
const newLen = newTokens.length, oldLen = oldTokens.length;
|
||||
let oldPos = basePath.oldPos, newPos = oldPos - diagonalPath, commonCount = 0;
|
||||
while (newPos + 1 < newLen && oldPos + 1 < oldLen && this.equals(oldTokens[oldPos + 1], newTokens[newPos + 1], options)) {
|
||||
newPos++;
|
||||
oldPos++;
|
||||
commonCount++;
|
||||
if (options.oneChangePerToken) {
|
||||
basePath.lastComponent = { count: 1, previousComponent: basePath.lastComponent, added: false, removed: false };
|
||||
}
|
||||
}
|
||||
if (commonCount && !options.oneChangePerToken) {
|
||||
basePath.lastComponent = { count: commonCount, previousComponent: basePath.lastComponent, added: false, removed: false };
|
||||
}
|
||||
basePath.oldPos = oldPos;
|
||||
return newPos;
|
||||
}
|
||||
equals(left, right, options) {
|
||||
if (options.comparator) {
|
||||
return options.comparator(left, right);
|
||||
}
|
||||
else {
|
||||
return left === right
|
||||
|| (!!options.ignoreCase && left.toLowerCase() === right.toLowerCase());
|
||||
}
|
||||
}
|
||||
removeEmpty(array) {
|
||||
const ret = [];
|
||||
for (let i = 0; i < array.length; i++) {
|
||||
if (array[i]) {
|
||||
ret.push(array[i]);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
||||
castInput(value, options) {
|
||||
return value;
|
||||
}
|
||||
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
||||
tokenize(value, options) {
|
||||
return Array.from(value);
|
||||
}
|
||||
join(chars) {
|
||||
// Assumes ValueT is string, which is the case for most subclasses.
|
||||
// When it's false, e.g. in diffArrays, this method needs to be overridden (e.g. with a no-op)
|
||||
// Yes, the casts are verbose and ugly, because this pattern - of having the base class SORT OF
|
||||
// assume tokens and values are strings, but not completely - is weird and janky.
|
||||
return chars.join('');
|
||||
}
|
||||
postProcess(changeObjects,
|
||||
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
||||
options) {
|
||||
return changeObjects;
|
||||
}
|
||||
get useLongestToken() {
|
||||
return false;
|
||||
}
|
||||
buildValues(lastComponent, newTokens, oldTokens) {
|
||||
// First we convert our linked list of components in reverse order to an
|
||||
// array in the right order:
|
||||
const components = [];
|
||||
let nextComponent;
|
||||
while (lastComponent) {
|
||||
components.push(lastComponent);
|
||||
nextComponent = lastComponent.previousComponent;
|
||||
delete lastComponent.previousComponent;
|
||||
lastComponent = nextComponent;
|
||||
}
|
||||
components.reverse();
|
||||
const componentLen = components.length;
|
||||
let componentPos = 0, newPos = 0, oldPos = 0;
|
||||
for (; componentPos < componentLen; componentPos++) {
|
||||
const component = components[componentPos];
|
||||
if (!component.removed) {
|
||||
if (!component.added && this.useLongestToken) {
|
||||
let value = newTokens.slice(newPos, newPos + component.count);
|
||||
value = value.map(function (value, i) {
|
||||
const oldValue = oldTokens[oldPos + i];
|
||||
return oldValue.length > value.length ? oldValue : value;
|
||||
});
|
||||
component.value = this.join(value);
|
||||
}
|
||||
else {
|
||||
component.value = this.join(newTokens.slice(newPos, newPos + component.count));
|
||||
}
|
||||
newPos += component.count;
|
||||
// Common case
|
||||
if (!component.added) {
|
||||
oldPos += component.count;
|
||||
}
|
||||
}
|
||||
else {
|
||||
component.value = this.join(oldTokens.slice(oldPos, oldPos + component.count));
|
||||
oldPos += component.count;
|
||||
}
|
||||
}
|
||||
return components;
|
||||
}
|
||||
}
|
||||
19
Frontend-Learner/node_modules/diff/libesm/diff/character.d.ts
generated
vendored
Normal file
19
Frontend-Learner/node_modules/diff/libesm/diff/character.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
import Diff from './base.js';
|
||||
import type { ChangeObject, CallbackOptionAbortable, CallbackOptionNonabortable, DiffCallbackNonabortable, DiffCharsOptionsAbortable, DiffCharsOptionsNonabortable } from '../types.js';
|
||||
declare class CharacterDiff extends Diff<string, string> {
|
||||
}
|
||||
export declare const characterDiff: CharacterDiff;
|
||||
/**
|
||||
* diffs two blocks of text, treating each character as a token.
|
||||
*
|
||||
* ("Characters" here means Unicode code points - the elements you get when you loop over a string with a `for ... of ...` loop.)
|
||||
*
|
||||
* @returns a list of change objects.
|
||||
*/
|
||||
export declare function diffChars(oldStr: string, newStr: string, options: DiffCallbackNonabortable<string>): undefined;
|
||||
export declare function diffChars(oldStr: string, newStr: string, options: DiffCharsOptionsAbortable & CallbackOptionAbortable<string>): undefined;
|
||||
export declare function diffChars(oldStr: string, newStr: string, options: DiffCharsOptionsNonabortable & CallbackOptionNonabortable<string>): undefined;
|
||||
export declare function diffChars(oldStr: string, newStr: string, options: DiffCharsOptionsAbortable): ChangeObject<string>[] | undefined;
|
||||
export declare function diffChars(oldStr: string, newStr: string, options?: DiffCharsOptionsNonabortable): ChangeObject<string>[];
|
||||
export {};
|
||||
//# sourceMappingURL=character.d.ts.map
|
||||
1
Frontend-Learner/node_modules/diff/libesm/diff/character.d.ts.map
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/diff/character.d.ts.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"character.d.ts","sourceRoot":"","sources":["../../src/diff/character.ts"],"names":[],"mappings":"AAAA,OAAO,IAAI,MAAM,WAAW,CAAC;AAC7B,OAAO,KAAK,EAAE,YAAY,EAAE,uBAAuB,EAAE,0BAA0B,EAAE,wBAAwB,EAAE,yBAAyB,EAAE,4BAA4B,EAAC,MAAM,aAAa,CAAC;AAEvL,cAAM,aAAc,SAAQ,IAAI,CAAC,MAAM,EAAE,MAAM,CAAC;CAAG;AAEnD,eAAO,MAAM,aAAa,eAAsB,CAAC;AAEjD;;;;;;GAMG;AACH,wBAAgB,SAAS,CACvB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,wBAAwB,CAAC,MAAM,CAAC,GACxC,SAAS,CAAC;AACb,wBAAgB,SAAS,CACvB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,yBAAyB,GAAG,uBAAuB,CAAC,MAAM,CAAC,GACnE,SAAS,CAAA;AACZ,wBAAgB,SAAS,CACvB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,4BAA4B,GAAG,0BAA0B,CAAC,MAAM,CAAC,GACzE,SAAS,CAAA;AACZ,wBAAgB,SAAS,CACvB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,yBAAyB,GACjC,YAAY,CAAC,MAAM,CAAC,EAAE,GAAG,SAAS,CAAA;AACrC,wBAAgB,SAAS,CACvB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,CAAC,EAAE,4BAA4B,GACrC,YAAY,CAAC,MAAM,CAAC,EAAE,CAAA"}
|
||||
7
Frontend-Learner/node_modules/diff/libesm/diff/character.js
generated
vendored
Normal file
7
Frontend-Learner/node_modules/diff/libesm/diff/character.js
generated
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
import Diff from './base.js';
|
||||
class CharacterDiff extends Diff {
|
||||
}
|
||||
export const characterDiff = new CharacterDiff();
|
||||
export function diffChars(oldStr, newStr, options) {
|
||||
return characterDiff.diff(oldStr, newStr, options);
|
||||
}
|
||||
18
Frontend-Learner/node_modules/diff/libesm/diff/css.d.ts
generated
vendored
Normal file
18
Frontend-Learner/node_modules/diff/libesm/diff/css.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
import Diff from './base.js';
|
||||
import type { ChangeObject, CallbackOptionAbortable, CallbackOptionNonabortable, DiffCallbackNonabortable, DiffCssOptionsAbortable, DiffCssOptionsNonabortable } from '../types.js';
|
||||
declare class CssDiff extends Diff<string, string> {
|
||||
tokenize(value: string): string[];
|
||||
}
|
||||
export declare const cssDiff: CssDiff;
|
||||
/**
|
||||
* diffs two blocks of text, comparing CSS tokens.
|
||||
*
|
||||
* @returns a list of change objects.
|
||||
*/
|
||||
export declare function diffCss(oldStr: string, newStr: string, options: DiffCallbackNonabortable<string>): undefined;
|
||||
export declare function diffCss(oldStr: string, newStr: string, options: DiffCssOptionsAbortable & CallbackOptionAbortable<string>): undefined;
|
||||
export declare function diffCss(oldStr: string, newStr: string, options: DiffCssOptionsNonabortable & CallbackOptionNonabortable<string>): undefined;
|
||||
export declare function diffCss(oldStr: string, newStr: string, options: DiffCssOptionsAbortable): ChangeObject<string>[] | undefined;
|
||||
export declare function diffCss(oldStr: string, newStr: string, options?: DiffCssOptionsNonabortable): ChangeObject<string>[];
|
||||
export {};
|
||||
//# sourceMappingURL=css.d.ts.map
|
||||
1
Frontend-Learner/node_modules/diff/libesm/diff/css.d.ts.map
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/diff/css.d.ts.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"css.d.ts","sourceRoot":"","sources":["../../src/diff/css.ts"],"names":[],"mappings":"AAAA,OAAO,IAAI,MAAM,WAAW,CAAC;AAC7B,OAAO,KAAK,EAAE,YAAY,EAAE,uBAAuB,EAAE,0BAA0B,EAAE,wBAAwB,EAAE,uBAAuB,EAAE,0BAA0B,EAAC,MAAM,aAAa,CAAC;AAEnL,cAAM,OAAQ,SAAQ,IAAI,CAAC,MAAM,EAAE,MAAM,CAAC;IACxC,QAAQ,CAAC,KAAK,EAAE,MAAM;CAGvB;AAED,eAAO,MAAM,OAAO,SAAgB,CAAC;AAErC;;;;GAIG;AACH,wBAAgB,OAAO,CACrB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,wBAAwB,CAAC,MAAM,CAAC,GACxC,SAAS,CAAC;AACb,wBAAgB,OAAO,CACrB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,uBAAuB,GAAG,uBAAuB,CAAC,MAAM,CAAC,GACjE,SAAS,CAAA;AACZ,wBAAgB,OAAO,CACrB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,0BAA0B,GAAG,0BAA0B,CAAC,MAAM,CAAC,GACvE,SAAS,CAAA;AACZ,wBAAgB,OAAO,CACrB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,uBAAuB,GAC/B,YAAY,CAAC,MAAM,CAAC,EAAE,GAAG,SAAS,CAAA;AACrC,wBAAgB,OAAO,CACrB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,CAAC,EAAE,0BAA0B,GACnC,YAAY,CAAC,MAAM,CAAC,EAAE,CAAA"}
|
||||
10
Frontend-Learner/node_modules/diff/libesm/diff/css.js
generated
vendored
Normal file
10
Frontend-Learner/node_modules/diff/libesm/diff/css.js
generated
vendored
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
import Diff from './base.js';
|
||||
class CssDiff extends Diff {
|
||||
tokenize(value) {
|
||||
return value.split(/([{}:;,]|\s+)/);
|
||||
}
|
||||
}
|
||||
export const cssDiff = new CssDiff();
|
||||
export function diffCss(oldStr, newStr, options) {
|
||||
return cssDiff.diff(oldStr, newStr, options);
|
||||
}
|
||||
24
Frontend-Learner/node_modules/diff/libesm/diff/json.d.ts
generated
vendored
Normal file
24
Frontend-Learner/node_modules/diff/libesm/diff/json.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
import Diff from './base.js';
|
||||
import type { ChangeObject, CallbackOptionAbortable, CallbackOptionNonabortable, DiffCallbackNonabortable, DiffJsonOptionsAbortable, DiffJsonOptionsNonabortable } from '../types.js';
|
||||
import { tokenize } from './line.js';
|
||||
declare class JsonDiff extends Diff<string, string, string | object> {
|
||||
get useLongestToken(): boolean;
|
||||
tokenize: typeof tokenize;
|
||||
castInput(value: string | object, options: DiffJsonOptionsNonabortable | DiffJsonOptionsAbortable): string;
|
||||
equals(left: string, right: string, options: DiffJsonOptionsNonabortable | DiffJsonOptionsAbortable): boolean;
|
||||
}
|
||||
export declare const jsonDiff: JsonDiff;
|
||||
/**
|
||||
* diffs two JSON-serializable objects by first serializing them to prettily-formatted JSON and then treating each line of the JSON as a token.
|
||||
* Object properties are ordered alphabetically in the serialized JSON, so the order of properties in the objects being compared doesn't affect the result.
|
||||
*
|
||||
* @returns a list of change objects.
|
||||
*/
|
||||
export declare function diffJson(oldStr: string | object, newStr: string | object, options: DiffCallbackNonabortable<string>): undefined;
|
||||
export declare function diffJson(oldStr: string | object, newStr: string | object, options: DiffJsonOptionsAbortable & CallbackOptionAbortable<string>): undefined;
|
||||
export declare function diffJson(oldStr: string | object, newStr: string | object, options: DiffJsonOptionsNonabortable & CallbackOptionNonabortable<string>): undefined;
|
||||
export declare function diffJson(oldStr: string | object, newStr: string | object, options: DiffJsonOptionsAbortable): ChangeObject<string>[] | undefined;
|
||||
export declare function diffJson(oldStr: string | object, newStr: string | object, options?: DiffJsonOptionsNonabortable): ChangeObject<string>[];
|
||||
export declare function canonicalize(obj: any, stack: Array<any> | null, replacementStack: Array<any> | null, replacer: (k: string, v: any) => any, key?: string): any;
|
||||
export {};
|
||||
//# sourceMappingURL=json.d.ts.map
|
||||
1
Frontend-Learner/node_modules/diff/libesm/diff/json.d.ts.map
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/diff/json.d.ts.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"json.d.ts","sourceRoot":"","sources":["../../src/diff/json.ts"],"names":[],"mappings":"AAAA,OAAO,IAAI,MAAM,WAAW,CAAC;AAC7B,OAAO,KAAK,EAAE,YAAY,EAAE,uBAAuB,EAAE,0BAA0B,EAAE,wBAAwB,EAAE,wBAAwB,EAAE,2BAA2B,EAAC,MAAM,aAAa,CAAC;AACrL,OAAO,EAAE,QAAQ,EAAE,MAAM,WAAW,CAAC;AAErC,cAAM,QAAS,SAAQ,IAAI,CAAC,MAAM,EAAE,MAAM,EAAE,MAAM,GAAG,MAAM,CAAC;IAC1D,IAAI,eAAe,YAIlB;IAED,QAAQ,kBAAY;IAEpB,SAAS,CAAC,KAAK,EAAE,MAAM,GAAG,MAAM,EAAE,OAAO,EAAE,2BAA2B,GAAG,wBAAwB;IAMjG,MAAM,CAAC,IAAI,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,2BAA2B,GAAG,wBAAwB;CAGpG;AAED,eAAO,MAAM,QAAQ,UAAiB,CAAC;AAEvC;;;;;GAKG;AACH,wBAAgB,QAAQ,CACtB,MAAM,EAAE,MAAM,GAAG,MAAM,EACvB,MAAM,EAAE,MAAM,GAAG,MAAM,EACvB,OAAO,EAAE,wBAAwB,CAAC,MAAM,CAAC,GACxC,SAAS,CAAC;AACb,wBAAgB,QAAQ,CACtB,MAAM,EAAE,MAAM,GAAG,MAAM,EACvB,MAAM,EAAE,MAAM,GAAG,MAAM,EACvB,OAAO,EAAE,wBAAwB,GAAG,uBAAuB,CAAC,MAAM,CAAC,GAClE,SAAS,CAAA;AACZ,wBAAgB,QAAQ,CACtB,MAAM,EAAE,MAAM,GAAG,MAAM,EACvB,MAAM,EAAE,MAAM,GAAG,MAAM,EACvB,OAAO,EAAE,2BAA2B,GAAG,0BAA0B,CAAC,MAAM,CAAC,GACxE,SAAS,CAAA;AACZ,wBAAgB,QAAQ,CACtB,MAAM,EAAE,MAAM,GAAG,MAAM,EACvB,MAAM,EAAE,MAAM,GAAG,MAAM,EACvB,OAAO,EAAE,wBAAwB,GAChC,YAAY,CAAC,MAAM,CAAC,EAAE,GAAG,SAAS,CAAA;AACrC,wBAAgB,QAAQ,CACtB,MAAM,EAAE,MAAM,GAAG,MAAM,EACvB,MAAM,EAAE,MAAM,GAAG,MAAM,EACvB,OAAO,CAAC,EAAE,2BAA2B,GACpC,YAAY,CAAC,MAAM,CAAC,EAAE,CAAA;AAQzB,wBAAgB,YAAY,CAC1B,GAAG,EAAE,GAAG,EACR,KAAK,EAAE,KAAK,CAAC,GAAG,CAAC,GAAG,IAAI,EAAE,gBAAgB,EAAE,KAAK,CAAC,GAAG,CAAC,GAAG,IAAI,EAC7D,QAAQ,EAAE,CAAC,CAAC,EAAE,MAAM,EAAE,CAAC,EAAE,GAAG,KAAK,GAAG,EACpC,GAAG,CAAC,EAAE,MAAM,OA0Db"}
|
||||
78
Frontend-Learner/node_modules/diff/libesm/diff/json.js
generated
vendored
Normal file
78
Frontend-Learner/node_modules/diff/libesm/diff/json.js
generated
vendored
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
import Diff from './base.js';
|
||||
import { tokenize } from './line.js';
|
||||
class JsonDiff extends Diff {
|
||||
constructor() {
|
||||
super(...arguments);
|
||||
this.tokenize = tokenize;
|
||||
}
|
||||
get useLongestToken() {
|
||||
// Discriminate between two lines of pretty-printed, serialized JSON where one of them has a
|
||||
// dangling comma and the other doesn't. Turns out including the dangling comma yields the nicest output:
|
||||
return true;
|
||||
}
|
||||
castInput(value, options) {
|
||||
const { undefinedReplacement, stringifyReplacer = (k, v) => typeof v === 'undefined' ? undefinedReplacement : v } = options;
|
||||
return typeof value === 'string' ? value : JSON.stringify(canonicalize(value, null, null, stringifyReplacer), null, ' ');
|
||||
}
|
||||
equals(left, right, options) {
|
||||
return super.equals(left.replace(/,([\r\n])/g, '$1'), right.replace(/,([\r\n])/g, '$1'), options);
|
||||
}
|
||||
}
|
||||
export const jsonDiff = new JsonDiff();
|
||||
export function diffJson(oldStr, newStr, options) {
|
||||
return jsonDiff.diff(oldStr, newStr, options);
|
||||
}
|
||||
// This function handles the presence of circular references by bailing out when encountering an
|
||||
// object that is already on the "stack" of items being processed. Accepts an optional replacer
|
||||
export function canonicalize(obj, stack, replacementStack, replacer, key) {
|
||||
stack = stack || [];
|
||||
replacementStack = replacementStack || [];
|
||||
if (replacer) {
|
||||
obj = replacer(key === undefined ? '' : key, obj);
|
||||
}
|
||||
let i;
|
||||
for (i = 0; i < stack.length; i += 1) {
|
||||
if (stack[i] === obj) {
|
||||
return replacementStack[i];
|
||||
}
|
||||
}
|
||||
let canonicalizedObj;
|
||||
if ('[object Array]' === Object.prototype.toString.call(obj)) {
|
||||
stack.push(obj);
|
||||
canonicalizedObj = new Array(obj.length);
|
||||
replacementStack.push(canonicalizedObj);
|
||||
for (i = 0; i < obj.length; i += 1) {
|
||||
canonicalizedObj[i] = canonicalize(obj[i], stack, replacementStack, replacer, String(i));
|
||||
}
|
||||
stack.pop();
|
||||
replacementStack.pop();
|
||||
return canonicalizedObj;
|
||||
}
|
||||
if (obj && obj.toJSON) {
|
||||
obj = obj.toJSON();
|
||||
}
|
||||
if (typeof obj === 'object' && obj !== null) {
|
||||
stack.push(obj);
|
||||
canonicalizedObj = {};
|
||||
replacementStack.push(canonicalizedObj);
|
||||
const sortedKeys = [];
|
||||
let key;
|
||||
for (key in obj) {
|
||||
/* istanbul ignore else */
|
||||
if (Object.prototype.hasOwnProperty.call(obj, key)) {
|
||||
sortedKeys.push(key);
|
||||
}
|
||||
}
|
||||
sortedKeys.sort();
|
||||
for (i = 0; i < sortedKeys.length; i += 1) {
|
||||
key = sortedKeys[i];
|
||||
canonicalizedObj[key] = canonicalize(obj[key], stack, replacementStack, replacer, key);
|
||||
}
|
||||
stack.pop();
|
||||
replacementStack.pop();
|
||||
}
|
||||
else {
|
||||
canonicalizedObj = obj;
|
||||
}
|
||||
return canonicalizedObj;
|
||||
}
|
||||
24
Frontend-Learner/node_modules/diff/libesm/diff/line.d.ts
generated
vendored
Normal file
24
Frontend-Learner/node_modules/diff/libesm/diff/line.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
import Diff from './base.js';
|
||||
import type { ChangeObject, CallbackOptionAbortable, CallbackOptionNonabortable, DiffCallbackNonabortable, DiffLinesOptionsAbortable, DiffLinesOptionsNonabortable } from '../types.js';
|
||||
declare class LineDiff extends Diff<string, string> {
|
||||
tokenize: typeof tokenize;
|
||||
equals(left: string, right: string, options: DiffLinesOptionsAbortable | DiffLinesOptionsNonabortable): boolean;
|
||||
}
|
||||
export declare const lineDiff: LineDiff;
|
||||
/**
|
||||
* diffs two blocks of text, treating each line as a token.
|
||||
* @returns a list of change objects.
|
||||
*/
|
||||
export declare function diffLines(oldStr: string, newStr: string, options: DiffCallbackNonabortable<string>): undefined;
|
||||
export declare function diffLines(oldStr: string, newStr: string, options: DiffLinesOptionsAbortable & CallbackOptionAbortable<string>): undefined;
|
||||
export declare function diffLines(oldStr: string, newStr: string, options: DiffLinesOptionsNonabortable & CallbackOptionNonabortable<string>): undefined;
|
||||
export declare function diffLines(oldStr: string, newStr: string, options: DiffLinesOptionsAbortable): ChangeObject<string>[] | undefined;
|
||||
export declare function diffLines(oldStr: string, newStr: string, options?: DiffLinesOptionsNonabortable): ChangeObject<string>[];
|
||||
export declare function diffTrimmedLines(oldStr: string, newStr: string, options: DiffCallbackNonabortable<string>): undefined;
|
||||
export declare function diffTrimmedLines(oldStr: string, newStr: string, options: DiffLinesOptionsAbortable & CallbackOptionAbortable<string>): undefined;
|
||||
export declare function diffTrimmedLines(oldStr: string, newStr: string, options: DiffLinesOptionsNonabortable & CallbackOptionNonabortable<string>): undefined;
|
||||
export declare function diffTrimmedLines(oldStr: string, newStr: string, options: DiffLinesOptionsAbortable): ChangeObject<string>[] | undefined;
|
||||
export declare function diffTrimmedLines(oldStr: string, newStr: string, options?: DiffLinesOptionsNonabortable): ChangeObject<string>[];
|
||||
export declare function tokenize(value: string, options: DiffLinesOptionsAbortable | DiffLinesOptionsNonabortable): string[];
|
||||
export {};
|
||||
//# sourceMappingURL=line.d.ts.map
|
||||
1
Frontend-Learner/node_modules/diff/libesm/diff/line.d.ts.map
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/diff/line.d.ts.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"line.d.ts","sourceRoot":"","sources":["../../src/diff/line.ts"],"names":[],"mappings":"AAAA,OAAO,IAAI,MAAM,WAAW,CAAC;AAC7B,OAAO,KAAK,EAAE,YAAY,EAAE,uBAAuB,EAAE,0BAA0B,EAAE,wBAAwB,EAAE,yBAAyB,EAAE,4BAA4B,EAAC,MAAM,aAAa,CAAC;AAGvL,cAAM,QAAS,SAAQ,IAAI,CAAC,MAAM,EAAE,MAAM,CAAC;IACzC,QAAQ,kBAAY;IAEpB,MAAM,CAAC,IAAI,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,yBAAyB,GAAG,4BAA4B;CAyBtG;AAED,eAAO,MAAM,QAAQ,UAAiB,CAAC;AAEvC;;;GAGG;AACH,wBAAgB,SAAS,CACvB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,wBAAwB,CAAC,MAAM,CAAC,GACxC,SAAS,CAAC;AACb,wBAAgB,SAAS,CACvB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,yBAAyB,GAAG,uBAAuB,CAAC,MAAM,CAAC,GACnE,SAAS,CAAA;AACZ,wBAAgB,SAAS,CACvB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,4BAA4B,GAAG,0BAA0B,CAAC,MAAM,CAAC,GACzE,SAAS,CAAA;AACZ,wBAAgB,SAAS,CACvB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,yBAAyB,GACjC,YAAY,CAAC,MAAM,CAAC,EAAE,GAAG,SAAS,CAAA;AACrC,wBAAgB,SAAS,CACvB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,CAAC,EAAE,4BAA4B,GACrC,YAAY,CAAC,MAAM,CAAC,EAAE,CAAA;AAWzB,wBAAgB,gBAAgB,CAC9B,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,wBAAwB,CAAC,MAAM,CAAC,GACxC,SAAS,CAAC;AACb,wBAAgB,gBAAgB,CAC9B,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,yBAAyB,GAAG,uBAAuB,CAAC,MAAM,CAAC,GACnE,SAAS,CAAA;AACZ,wBAAgB,gBAAgB,CAC9B,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,4BAA4B,GAAG,0BAA0B,CAAC,MAAM,CAAC,GACzE,SAAS,CAAA;AACZ,wBAAgB,gBAAgB,CAC9B,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,yBAAyB,GACjC,YAAY,CAAC,MAAM,CAAC,EAAE,GAAG,SAAS,CAAA;AACrC,wBAAgB,gBAAgB,CAC9B,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,CAAC,EAAE,4BAA4B,GACrC,YAAY,CAAC,MAAM,CAAC,EAAE,CAAA;AAOzB,wBAAgB,QAAQ,CAAC,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,yBAAyB,GAAG,4BAA4B,YA0BxG"}
|
||||
65
Frontend-Learner/node_modules/diff/libesm/diff/line.js
generated
vendored
Normal file
65
Frontend-Learner/node_modules/diff/libesm/diff/line.js
generated
vendored
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
import Diff from './base.js';
|
||||
import { generateOptions } from '../util/params.js';
|
||||
class LineDiff extends Diff {
|
||||
constructor() {
|
||||
super(...arguments);
|
||||
this.tokenize = tokenize;
|
||||
}
|
||||
equals(left, right, options) {
|
||||
// If we're ignoring whitespace, we need to normalise lines by stripping
|
||||
// whitespace before checking equality. (This has an annoying interaction
|
||||
// with newlineIsToken that requires special handling: if newlines get their
|
||||
// own token, then we DON'T want to trim the *newline* tokens down to empty
|
||||
// strings, since this would cause us to treat whitespace-only line content
|
||||
// as equal to a separator between lines, which would be weird and
|
||||
// inconsistent with the documented behavior of the options.)
|
||||
if (options.ignoreWhitespace) {
|
||||
if (!options.newlineIsToken || !left.includes('\n')) {
|
||||
left = left.trim();
|
||||
}
|
||||
if (!options.newlineIsToken || !right.includes('\n')) {
|
||||
right = right.trim();
|
||||
}
|
||||
}
|
||||
else if (options.ignoreNewlineAtEof && !options.newlineIsToken) {
|
||||
if (left.endsWith('\n')) {
|
||||
left = left.slice(0, -1);
|
||||
}
|
||||
if (right.endsWith('\n')) {
|
||||
right = right.slice(0, -1);
|
||||
}
|
||||
}
|
||||
return super.equals(left, right, options);
|
||||
}
|
||||
}
|
||||
export const lineDiff = new LineDiff();
|
||||
export function diffLines(oldStr, newStr, options) {
|
||||
return lineDiff.diff(oldStr, newStr, options);
|
||||
}
|
||||
export function diffTrimmedLines(oldStr, newStr, options) {
|
||||
options = generateOptions(options, { ignoreWhitespace: true });
|
||||
return lineDiff.diff(oldStr, newStr, options);
|
||||
}
|
||||
// Exported standalone so it can be used from jsonDiff too.
|
||||
export function tokenize(value, options) {
|
||||
if (options.stripTrailingCr) {
|
||||
// remove one \r before \n to match GNU diff's --strip-trailing-cr behavior
|
||||
value = value.replace(/\r\n/g, '\n');
|
||||
}
|
||||
const retLines = [], linesAndNewlines = value.split(/(\n|\r\n)/);
|
||||
// Ignore the final empty token that occurs if the string ends with a new line
|
||||
if (!linesAndNewlines[linesAndNewlines.length - 1]) {
|
||||
linesAndNewlines.pop();
|
||||
}
|
||||
// Merge the content and line separators into single tokens
|
||||
for (let i = 0; i < linesAndNewlines.length; i++) {
|
||||
const line = linesAndNewlines[i];
|
||||
if (i % 2 && !options.newlineIsToken) {
|
||||
retLines[retLines.length - 1] += line;
|
||||
}
|
||||
else {
|
||||
retLines.push(line);
|
||||
}
|
||||
}
|
||||
return retLines;
|
||||
}
|
||||
21
Frontend-Learner/node_modules/diff/libesm/diff/sentence.d.ts
generated
vendored
Normal file
21
Frontend-Learner/node_modules/diff/libesm/diff/sentence.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
import Diff from './base.js';
|
||||
import type { ChangeObject, CallbackOptionAbortable, CallbackOptionNonabortable, DiffCallbackNonabortable, DiffSentencesOptionsAbortable, DiffSentencesOptionsNonabortable } from '../types.js';
|
||||
declare class SentenceDiff extends Diff<string, string> {
|
||||
tokenize(value: string): string[];
|
||||
}
|
||||
export declare const sentenceDiff: SentenceDiff;
|
||||
/**
|
||||
* diffs two blocks of text, treating each sentence, and the whitespace between each pair of sentences, as a token.
|
||||
* The characters `.`, `!`, and `?`, when followed by whitespace, are treated as marking the end of a sentence; nothing else besides the end of the string is considered to mark a sentence end.
|
||||
*
|
||||
* (For more sophisticated detection of sentence breaks, including support for non-English punctuation, consider instead tokenizing with an [`Intl.Segmenter`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Intl/Segmenter) with `granularity: 'sentence'` and passing the result to `diffArrays`.)
|
||||
*
|
||||
* @returns a list of change objects.
|
||||
*/
|
||||
export declare function diffSentences(oldStr: string, newStr: string, options: DiffCallbackNonabortable<string>): undefined;
|
||||
export declare function diffSentences(oldStr: string, newStr: string, options: DiffSentencesOptionsAbortable & CallbackOptionAbortable<string>): undefined;
|
||||
export declare function diffSentences(oldStr: string, newStr: string, options: DiffSentencesOptionsNonabortable & CallbackOptionNonabortable<string>): undefined;
|
||||
export declare function diffSentences(oldStr: string, newStr: string, options: DiffSentencesOptionsAbortable): ChangeObject<string>[] | undefined;
|
||||
export declare function diffSentences(oldStr: string, newStr: string, options?: DiffSentencesOptionsNonabortable): ChangeObject<string>[];
|
||||
export {};
|
||||
//# sourceMappingURL=sentence.d.ts.map
|
||||
1
Frontend-Learner/node_modules/diff/libesm/diff/sentence.d.ts.map
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/diff/sentence.d.ts.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"sentence.d.ts","sourceRoot":"","sources":["../../src/diff/sentence.ts"],"names":[],"mappings":"AAAA,OAAO,IAAI,MAAM,WAAW,CAAC;AAC7B,OAAO,KAAK,EACV,YAAY,EACZ,uBAAuB,EACvB,0BAA0B,EAC1B,wBAAwB,EACxB,6BAA6B,EAC7B,gCAAgC,EACjC,MAAM,aAAa,CAAC;AAMrB,cAAM,YAAa,SAAQ,IAAI,CAAC,MAAM,EAAE,MAAM,CAAC;IAC7C,QAAQ,CAAC,KAAK,EAAE,MAAM;CAoCvB;AAED,eAAO,MAAM,YAAY,cAAqB,CAAC;AAE/C;;;;;;;GAOG;AACH,wBAAgB,aAAa,CAC3B,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,wBAAwB,CAAC,MAAM,CAAC,GACxC,SAAS,CAAC;AACb,wBAAgB,aAAa,CAC3B,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,6BAA6B,GAAG,uBAAuB,CAAC,MAAM,CAAC,GACvE,SAAS,CAAA;AACZ,wBAAgB,aAAa,CAC3B,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,gCAAgC,GAAG,0BAA0B,CAAC,MAAM,CAAC,GAC7E,SAAS,CAAA;AACZ,wBAAgB,aAAa,CAC3B,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,6BAA6B,GACrC,YAAY,CAAC,MAAM,CAAC,EAAE,GAAG,SAAS,CAAA;AACrC,wBAAgB,aAAa,CAC3B,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,CAAC,EAAE,gCAAgC,GACzC,YAAY,CAAC,MAAM,CAAC,EAAE,CAAA"}
|
||||
43
Frontend-Learner/node_modules/diff/libesm/diff/sentence.js
generated
vendored
Normal file
43
Frontend-Learner/node_modules/diff/libesm/diff/sentence.js
generated
vendored
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
import Diff from './base.js';
|
||||
function isSentenceEndPunct(char) {
|
||||
return char == '.' || char == '!' || char == '?';
|
||||
}
|
||||
class SentenceDiff extends Diff {
|
||||
tokenize(value) {
|
||||
var _a;
|
||||
// If in future we drop support for environments that don't support lookbehinds, we can replace
|
||||
// this entire function with:
|
||||
// return value.split(/(?<=[.!?])(\s+|$)/);
|
||||
// but until then, for similar reasons to the trailingWs function in string.ts, we are forced
|
||||
// to do this verbosely "by hand" instead of using a regex.
|
||||
const result = [];
|
||||
let tokenStartI = 0;
|
||||
for (let i = 0; i < value.length; i++) {
|
||||
if (i == value.length - 1) {
|
||||
result.push(value.slice(tokenStartI));
|
||||
break;
|
||||
}
|
||||
if (isSentenceEndPunct(value[i]) && value[i + 1].match(/\s/)) {
|
||||
// We've hit a sentence break - i.e. a punctuation mark followed by whitespace.
|
||||
// We now want to push TWO tokens to the result:
|
||||
// 1. the sentence
|
||||
result.push(value.slice(tokenStartI, i + 1));
|
||||
// 2. the whitespace
|
||||
i = tokenStartI = i + 1;
|
||||
while ((_a = value[i + 1]) === null || _a === void 0 ? void 0 : _a.match(/\s/)) {
|
||||
i++;
|
||||
}
|
||||
result.push(value.slice(tokenStartI, i + 1));
|
||||
// Then the next token (a sentence) starts on the character after the whitespace.
|
||||
// (It's okay if this is off the end of the string - then the outer loop will terminate
|
||||
// here anyway.)
|
||||
tokenStartI = i + 1;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
export const sentenceDiff = new SentenceDiff();
|
||||
export function diffSentences(oldStr, newStr, options) {
|
||||
return sentenceDiff.diff(oldStr, newStr, options);
|
||||
}
|
||||
35
Frontend-Learner/node_modules/diff/libesm/diff/word.d.ts
generated
vendored
Normal file
35
Frontend-Learner/node_modules/diff/libesm/diff/word.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
import Diff from './base.js';
|
||||
import type { ChangeObject, CallbackOptionAbortable, CallbackOptionNonabortable, DiffCallbackNonabortable, DiffWordsOptionsAbortable, DiffWordsOptionsNonabortable } from '../types.js';
|
||||
declare class WordDiff extends Diff<string, string> {
|
||||
equals(left: string, right: string, options: DiffWordsOptionsAbortable | DiffWordsOptionsNonabortable): boolean;
|
||||
tokenize(value: string, options?: DiffWordsOptionsAbortable | DiffWordsOptionsNonabortable): string[];
|
||||
join(tokens: string[]): string;
|
||||
postProcess(changes: ChangeObject<string>[], options: any): ChangeObject<string>[];
|
||||
}
|
||||
export declare const wordDiff: WordDiff;
|
||||
/**
|
||||
* diffs two blocks of text, treating each word and each punctuation mark as a token.
|
||||
* Whitespace is ignored when computing the diff (but preserved as far as possible in the final change objects).
|
||||
*
|
||||
* @returns a list of change objects.
|
||||
*/
|
||||
export declare function diffWords(oldStr: string, newStr: string, options: DiffCallbackNonabortable<string>): undefined;
|
||||
export declare function diffWords(oldStr: string, newStr: string, options: DiffWordsOptionsAbortable & CallbackOptionAbortable<string>): undefined;
|
||||
export declare function diffWords(oldStr: string, newStr: string, options: DiffWordsOptionsNonabortable & CallbackOptionNonabortable<string>): undefined;
|
||||
export declare function diffWords(oldStr: string, newStr: string, options: DiffWordsOptionsAbortable): ChangeObject<string>[] | undefined;
|
||||
export declare function diffWords(oldStr: string, newStr: string, options?: DiffWordsOptionsNonabortable): ChangeObject<string>[];
|
||||
declare class WordsWithSpaceDiff extends Diff<string, string> {
|
||||
tokenize(value: string): [] | RegExpMatchArray;
|
||||
}
|
||||
export declare const wordsWithSpaceDiff: WordsWithSpaceDiff;
|
||||
/**
|
||||
* diffs two blocks of text, treating each word, punctuation mark, newline, or run of (non-newline) whitespace as a token.
|
||||
* @returns a list of change objects
|
||||
*/
|
||||
export declare function diffWordsWithSpace(oldStr: string, newStr: string, options: DiffCallbackNonabortable<string>): undefined;
|
||||
export declare function diffWordsWithSpace(oldStr: string, newStr: string, options: DiffWordsOptionsAbortable & CallbackOptionAbortable<string>): undefined;
|
||||
export declare function diffWordsWithSpace(oldStr: string, newStr: string, options: DiffWordsOptionsNonabortable & CallbackOptionNonabortable<string>): undefined;
|
||||
export declare function diffWordsWithSpace(oldStr: string, newStr: string, options: DiffWordsOptionsAbortable): ChangeObject<string>[] | undefined;
|
||||
export declare function diffWordsWithSpace(oldStr: string, newStr: string, options?: DiffWordsOptionsNonabortable): ChangeObject<string>[];
|
||||
export {};
|
||||
//# sourceMappingURL=word.d.ts.map
|
||||
1
Frontend-Learner/node_modules/diff/libesm/diff/word.d.ts.map
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/diff/word.d.ts.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"word.d.ts","sourceRoot":"","sources":["../../src/diff/word.ts"],"names":[],"mappings":"AAAA,OAAO,IAAI,MAAM,WAAW,CAAC;AAC7B,OAAO,KAAK,EAAE,YAAY,EAAE,uBAAuB,EAAE,0BAA0B,EAAE,wBAAwB,EAAE,yBAAyB,EAAE,4BAA4B,EAAC,MAAM,aAAa,CAAC;AAmDvL,cAAM,QAAS,SAAQ,IAAI,CAAC,MAAM,EAAE,MAAM,CAAC;IACzC,MAAM,CAAC,IAAI,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,yBAAyB,GAAG,4BAA4B;IASrG,QAAQ,CAAC,KAAK,EAAE,MAAM,EAAE,OAAO,GAAE,yBAAyB,GAAG,4BAAiC;IAmC9F,IAAI,CAAC,MAAM,EAAE,MAAM,EAAE;IAerB,WAAW,CAAC,OAAO,EAAE,YAAY,CAAC,MAAM,CAAC,EAAE,EAAE,OAAO,EAAE,GAAG;CA6B1D;AAED,eAAO,MAAM,QAAQ,UAAiB,CAAC;AAEvC;;;;;GAKG;AACH,wBAAgB,SAAS,CACvB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,wBAAwB,CAAC,MAAM,CAAC,GACxC,SAAS,CAAC;AACb,wBAAgB,SAAS,CACvB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,yBAAyB,GAAG,uBAAuB,CAAC,MAAM,CAAC,GACnE,SAAS,CAAA;AACZ,wBAAgB,SAAS,CACvB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,4BAA4B,GAAG,0BAA0B,CAAC,MAAM,CAAC,GACzE,SAAS,CAAA;AACZ,wBAAgB,SAAS,CACvB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,yBAAyB,GACjC,YAAY,CAAC,MAAM,CAAC,EAAE,GAAG,SAAS,CAAA;AACrC,wBAAgB,SAAS,CACvB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,CAAC,EAAE,4BAA4B,GACrC,YAAY,CAAC,MAAM,CAAC,EAAE,CAAA;AA8IzB,cAAM,kBAAmB,SAAQ,IAAI,CAAC,MAAM,EAAE,MAAM,CAAC;IACnD,QAAQ,CAAC,KAAK,EAAE,MAAM;CASvB;AAED,eAAO,MAAM,kBAAkB,oBAA2B,CAAC;AAE3D;;;GAGG;AACH,wBAAgB,kBAAkB,CAChC,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,wBAAwB,CAAC,MAAM,CAAC,GACxC,SAAS,CAAC;AACb,wBAAgB,kBAAkB,CAChC,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,yBAAyB,GAAG,uBAAuB,CAAC,MAAM,CAAC,GACnE,SAAS,CAAA;AACZ,wBAAgB,kBAAkB,CAChC,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,4BAA4B,GAAG,0BAA0B,CAAC,MAAM,CAAC,GACzE,SAAS,CAAA;AACZ,wBAAgB,kBAAkB,CAChC,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,EAAE,yBAAyB,GACjC,YAAY,CAAC,MAAM,CAAC,EAAE,GAAG,SAAS,CAAA;AACrC,wBAAgB,kBAAkB,CAChC,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,OAAO,CAAC,EAAE,4BAA4B,GACrC,YAAY,CAAC,MAAM,CAAC,EAAE,CAAA"}
|
||||
276
Frontend-Learner/node_modules/diff/libesm/diff/word.js
generated
vendored
Normal file
276
Frontend-Learner/node_modules/diff/libesm/diff/word.js
generated
vendored
Normal file
|
|
@ -0,0 +1,276 @@
|
|||
import Diff from './base.js';
|
||||
import { longestCommonPrefix, longestCommonSuffix, replacePrefix, replaceSuffix, removePrefix, removeSuffix, maximumOverlap, leadingWs, trailingWs } from '../util/string.js';
|
||||
// Based on https://en.wikipedia.org/wiki/Latin_script_in_Unicode
|
||||
//
|
||||
// Ranges and exceptions:
|
||||
// Latin-1 Supplement, 0080–00FF
|
||||
// - U+00D7 × Multiplication sign
|
||||
// - U+00F7 ÷ Division sign
|
||||
// Latin Extended-A, 0100–017F
|
||||
// Latin Extended-B, 0180–024F
|
||||
// IPA Extensions, 0250–02AF
|
||||
// Spacing Modifier Letters, 02B0–02FF
|
||||
// - U+02C7 ˇ ˇ Caron
|
||||
// - U+02D8 ˘ ˘ Breve
|
||||
// - U+02D9 ˙ ˙ Dot Above
|
||||
// - U+02DA ˚ ˚ Ring Above
|
||||
// - U+02DB ˛ ˛ Ogonek
|
||||
// - U+02DC ˜ ˜ Small Tilde
|
||||
// - U+02DD ˝ ˝ Double Acute Accent
|
||||
// Latin Extended Additional, 1E00–1EFF
|
||||
const extendedWordChars = 'a-zA-Z0-9_\\u{C0}-\\u{FF}\\u{D8}-\\u{F6}\\u{F8}-\\u{2C6}\\u{2C8}-\\u{2D7}\\u{2DE}-\\u{2FF}\\u{1E00}-\\u{1EFF}';
|
||||
// Each token is one of the following:
|
||||
// - A punctuation mark plus the surrounding whitespace
|
||||
// - A word plus the surrounding whitespace
|
||||
// - Pure whitespace (but only in the special case where this the entire text
|
||||
// is just whitespace)
|
||||
//
|
||||
// We have to include surrounding whitespace in the tokens because the two
|
||||
// alternative approaches produce horribly broken results:
|
||||
// * If we just discard the whitespace, we can't fully reproduce the original
|
||||
// text from the sequence of tokens and any attempt to render the diff will
|
||||
// get the whitespace wrong.
|
||||
// * If we have separate tokens for whitespace, then in a typical text every
|
||||
// second token will be a single space character. But this often results in
|
||||
// the optimal diff between two texts being a perverse one that preserves
|
||||
// the spaces between words but deletes and reinserts actual common words.
|
||||
// See https://github.com/kpdecker/jsdiff/issues/160#issuecomment-1866099640
|
||||
// for an example.
|
||||
//
|
||||
// Keeping the surrounding whitespace of course has implications for .equals
|
||||
// and .join, not just .tokenize.
|
||||
// This regex does NOT fully implement the tokenization rules described above.
|
||||
// Instead, it gives runs of whitespace their own "token". The tokenize method
|
||||
// then handles stitching whitespace tokens onto adjacent word or punctuation
|
||||
// tokens.
|
||||
const tokenizeIncludingWhitespace = new RegExp(`[${extendedWordChars}]+|\\s+|[^${extendedWordChars}]`, 'ug');
|
||||
class WordDiff extends Diff {
|
||||
equals(left, right, options) {
|
||||
if (options.ignoreCase) {
|
||||
left = left.toLowerCase();
|
||||
right = right.toLowerCase();
|
||||
}
|
||||
return left.trim() === right.trim();
|
||||
}
|
||||
tokenize(value, options = {}) {
|
||||
let parts;
|
||||
if (options.intlSegmenter) {
|
||||
const segmenter = options.intlSegmenter;
|
||||
if (segmenter.resolvedOptions().granularity != 'word') {
|
||||
throw new Error('The segmenter passed must have a granularity of "word"');
|
||||
}
|
||||
parts = Array.from(segmenter.segment(value), segment => segment.segment);
|
||||
}
|
||||
else {
|
||||
parts = value.match(tokenizeIncludingWhitespace) || [];
|
||||
}
|
||||
const tokens = [];
|
||||
let prevPart = null;
|
||||
parts.forEach(part => {
|
||||
if ((/\s/).test(part)) {
|
||||
if (prevPart == null) {
|
||||
tokens.push(part);
|
||||
}
|
||||
else {
|
||||
tokens.push(tokens.pop() + part);
|
||||
}
|
||||
}
|
||||
else if (prevPart != null && (/\s/).test(prevPart)) {
|
||||
if (tokens[tokens.length - 1] == prevPart) {
|
||||
tokens.push(tokens.pop() + part);
|
||||
}
|
||||
else {
|
||||
tokens.push(prevPart + part);
|
||||
}
|
||||
}
|
||||
else {
|
||||
tokens.push(part);
|
||||
}
|
||||
prevPart = part;
|
||||
});
|
||||
return tokens;
|
||||
}
|
||||
join(tokens) {
|
||||
// Tokens being joined here will always have appeared consecutively in the
|
||||
// same text, so we can simply strip off the leading whitespace from all the
|
||||
// tokens except the first (and except any whitespace-only tokens - but such
|
||||
// a token will always be the first and only token anyway) and then join them
|
||||
// and the whitespace around words and punctuation will end up correct.
|
||||
return tokens.map((token, i) => {
|
||||
if (i == 0) {
|
||||
return token;
|
||||
}
|
||||
else {
|
||||
return token.replace((/^\s+/), '');
|
||||
}
|
||||
}).join('');
|
||||
}
|
||||
postProcess(changes, options) {
|
||||
if (!changes || options.oneChangePerToken) {
|
||||
return changes;
|
||||
}
|
||||
let lastKeep = null;
|
||||
// Change objects representing any insertion or deletion since the last
|
||||
// "keep" change object. There can be at most one of each.
|
||||
let insertion = null;
|
||||
let deletion = null;
|
||||
changes.forEach(change => {
|
||||
if (change.added) {
|
||||
insertion = change;
|
||||
}
|
||||
else if (change.removed) {
|
||||
deletion = change;
|
||||
}
|
||||
else {
|
||||
if (insertion || deletion) { // May be false at start of text
|
||||
dedupeWhitespaceInChangeObjects(lastKeep, deletion, insertion, change);
|
||||
}
|
||||
lastKeep = change;
|
||||
insertion = null;
|
||||
deletion = null;
|
||||
}
|
||||
});
|
||||
if (insertion || deletion) {
|
||||
dedupeWhitespaceInChangeObjects(lastKeep, deletion, insertion, null);
|
||||
}
|
||||
return changes;
|
||||
}
|
||||
}
|
||||
export const wordDiff = new WordDiff();
|
||||
export function diffWords(oldStr, newStr, options) {
|
||||
// This option has never been documented and never will be (it's clearer to
|
||||
// just call `diffWordsWithSpace` directly if you need that behavior), but
|
||||
// has existed in jsdiff for a long time, so we retain support for it here
|
||||
// for the sake of backwards compatibility.
|
||||
if ((options === null || options === void 0 ? void 0 : options.ignoreWhitespace) != null && !options.ignoreWhitespace) {
|
||||
return diffWordsWithSpace(oldStr, newStr, options);
|
||||
}
|
||||
return wordDiff.diff(oldStr, newStr, options);
|
||||
}
|
||||
function dedupeWhitespaceInChangeObjects(startKeep, deletion, insertion, endKeep) {
|
||||
// Before returning, we tidy up the leading and trailing whitespace of the
|
||||
// change objects to eliminate cases where trailing whitespace in one object
|
||||
// is repeated as leading whitespace in the next.
|
||||
// Below are examples of the outcomes we want here to explain the code.
|
||||
// I=insert, K=keep, D=delete
|
||||
// 1. diffing 'foo bar baz' vs 'foo baz'
|
||||
// Prior to cleanup, we have K:'foo ' D:' bar ' K:' baz'
|
||||
// After cleanup, we want: K:'foo ' D:'bar ' K:'baz'
|
||||
//
|
||||
// 2. Diffing 'foo bar baz' vs 'foo qux baz'
|
||||
// Prior to cleanup, we have K:'foo ' D:' bar ' I:' qux ' K:' baz'
|
||||
// After cleanup, we want K:'foo ' D:'bar' I:'qux' K:' baz'
|
||||
//
|
||||
// 3. Diffing 'foo\nbar baz' vs 'foo baz'
|
||||
// Prior to cleanup, we have K:'foo ' D:'\nbar ' K:' baz'
|
||||
// After cleanup, we want K'foo' D:'\nbar' K:' baz'
|
||||
//
|
||||
// 4. Diffing 'foo baz' vs 'foo\nbar baz'
|
||||
// Prior to cleanup, we have K:'foo\n' I:'\nbar ' K:' baz'
|
||||
// After cleanup, we ideally want K'foo' I:'\nbar' K:' baz'
|
||||
// but don't actually manage this currently (the pre-cleanup change
|
||||
// objects don't contain enough information to make it possible).
|
||||
//
|
||||
// 5. Diffing 'foo bar baz' vs 'foo baz'
|
||||
// Prior to cleanup, we have K:'foo ' D:' bar ' K:' baz'
|
||||
// After cleanup, we want K:'foo ' D:' bar ' K:'baz'
|
||||
//
|
||||
// Our handling is unavoidably imperfect in the case where there's a single
|
||||
// indel between keeps and the whitespace has changed. For instance, consider
|
||||
// diffing 'foo\tbar\nbaz' vs 'foo baz'. Unless we create an extra change
|
||||
// object to represent the insertion of the space character (which isn't even
|
||||
// a token), we have no way to avoid losing information about the texts'
|
||||
// original whitespace in the result we return. Still, we do our best to
|
||||
// output something that will look sensible if we e.g. print it with
|
||||
// insertions in green and deletions in red.
|
||||
// Between two "keep" change objects (or before the first or after the last
|
||||
// change object), we can have either:
|
||||
// * A "delete" followed by an "insert"
|
||||
// * Just an "insert"
|
||||
// * Just a "delete"
|
||||
// We handle the three cases separately.
|
||||
if (deletion && insertion) {
|
||||
const oldWsPrefix = leadingWs(deletion.value);
|
||||
const oldWsSuffix = trailingWs(deletion.value);
|
||||
const newWsPrefix = leadingWs(insertion.value);
|
||||
const newWsSuffix = trailingWs(insertion.value);
|
||||
if (startKeep) {
|
||||
const commonWsPrefix = longestCommonPrefix(oldWsPrefix, newWsPrefix);
|
||||
startKeep.value = replaceSuffix(startKeep.value, newWsPrefix, commonWsPrefix);
|
||||
deletion.value = removePrefix(deletion.value, commonWsPrefix);
|
||||
insertion.value = removePrefix(insertion.value, commonWsPrefix);
|
||||
}
|
||||
if (endKeep) {
|
||||
const commonWsSuffix = longestCommonSuffix(oldWsSuffix, newWsSuffix);
|
||||
endKeep.value = replacePrefix(endKeep.value, newWsSuffix, commonWsSuffix);
|
||||
deletion.value = removeSuffix(deletion.value, commonWsSuffix);
|
||||
insertion.value = removeSuffix(insertion.value, commonWsSuffix);
|
||||
}
|
||||
}
|
||||
else if (insertion) {
|
||||
// The whitespaces all reflect what was in the new text rather than
|
||||
// the old, so we essentially have no information about whitespace
|
||||
// insertion or deletion. We just want to dedupe the whitespace.
|
||||
// We do that by having each change object keep its trailing
|
||||
// whitespace and deleting duplicate leading whitespace where
|
||||
// present.
|
||||
if (startKeep) {
|
||||
const ws = leadingWs(insertion.value);
|
||||
insertion.value = insertion.value.substring(ws.length);
|
||||
}
|
||||
if (endKeep) {
|
||||
const ws = leadingWs(endKeep.value);
|
||||
endKeep.value = endKeep.value.substring(ws.length);
|
||||
}
|
||||
// otherwise we've got a deletion and no insertion
|
||||
}
|
||||
else if (startKeep && endKeep) {
|
||||
const newWsFull = leadingWs(endKeep.value), delWsStart = leadingWs(deletion.value), delWsEnd = trailingWs(deletion.value);
|
||||
// Any whitespace that comes straight after startKeep in both the old and
|
||||
// new texts, assign to startKeep and remove from the deletion.
|
||||
const newWsStart = longestCommonPrefix(newWsFull, delWsStart);
|
||||
deletion.value = removePrefix(deletion.value, newWsStart);
|
||||
// Any whitespace that comes straight before endKeep in both the old and
|
||||
// new texts, and hasn't already been assigned to startKeep, assign to
|
||||
// endKeep and remove from the deletion.
|
||||
const newWsEnd = longestCommonSuffix(removePrefix(newWsFull, newWsStart), delWsEnd);
|
||||
deletion.value = removeSuffix(deletion.value, newWsEnd);
|
||||
endKeep.value = replacePrefix(endKeep.value, newWsFull, newWsEnd);
|
||||
// If there's any whitespace from the new text that HASN'T already been
|
||||
// assigned, assign it to the start:
|
||||
startKeep.value = replaceSuffix(startKeep.value, newWsFull, newWsFull.slice(0, newWsFull.length - newWsEnd.length));
|
||||
}
|
||||
else if (endKeep) {
|
||||
// We are at the start of the text. Preserve all the whitespace on
|
||||
// endKeep, and just remove whitespace from the end of deletion to the
|
||||
// extent that it overlaps with the start of endKeep.
|
||||
const endKeepWsPrefix = leadingWs(endKeep.value);
|
||||
const deletionWsSuffix = trailingWs(deletion.value);
|
||||
const overlap = maximumOverlap(deletionWsSuffix, endKeepWsPrefix);
|
||||
deletion.value = removeSuffix(deletion.value, overlap);
|
||||
}
|
||||
else if (startKeep) {
|
||||
// We are at the END of the text. Preserve all the whitespace on
|
||||
// startKeep, and just remove whitespace from the start of deletion to
|
||||
// the extent that it overlaps with the end of startKeep.
|
||||
const startKeepWsSuffix = trailingWs(startKeep.value);
|
||||
const deletionWsPrefix = leadingWs(deletion.value);
|
||||
const overlap = maximumOverlap(startKeepWsSuffix, deletionWsPrefix);
|
||||
deletion.value = removePrefix(deletion.value, overlap);
|
||||
}
|
||||
}
|
||||
class WordsWithSpaceDiff extends Diff {
|
||||
tokenize(value) {
|
||||
// Slightly different to the tokenizeIncludingWhitespace regex used above in
|
||||
// that this one treats each individual newline as a distinct tokens, rather
|
||||
// than merging them into other surrounding whitespace. This was requested
|
||||
// in https://github.com/kpdecker/jsdiff/issues/180 &
|
||||
// https://github.com/kpdecker/jsdiff/issues/211
|
||||
const regex = new RegExp(`(\\r?\\n)|[${extendedWordChars}]+|[^\\S\\n\\r]+|[^${extendedWordChars}]`, 'ug');
|
||||
return value.match(regex) || [];
|
||||
}
|
||||
}
|
||||
export const wordsWithSpaceDiff = new WordsWithSpaceDiff();
|
||||
export function diffWordsWithSpace(oldStr, newStr, options) {
|
||||
return wordsWithSpaceDiff.diff(oldStr, newStr, options);
|
||||
}
|
||||
20
Frontend-Learner/node_modules/diff/libesm/index.d.ts
generated
vendored
Normal file
20
Frontend-Learner/node_modules/diff/libesm/index.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
import Diff from './diff/base.js';
|
||||
import { diffChars, characterDiff } from './diff/character.js';
|
||||
import { diffWords, diffWordsWithSpace, wordDiff, wordsWithSpaceDiff } from './diff/word.js';
|
||||
import { diffLines, diffTrimmedLines, lineDiff } from './diff/line.js';
|
||||
import { diffSentences, sentenceDiff } from './diff/sentence.js';
|
||||
import { diffCss, cssDiff } from './diff/css.js';
|
||||
import { diffJson, canonicalize, jsonDiff } from './diff/json.js';
|
||||
import { diffArrays, arrayDiff } from './diff/array.js';
|
||||
import { applyPatch, applyPatches } from './patch/apply.js';
|
||||
import type { ApplyPatchOptions, ApplyPatchesOptions } from './patch/apply.js';
|
||||
import { parsePatch } from './patch/parse.js';
|
||||
import { reversePatch } from './patch/reverse.js';
|
||||
import { structuredPatch, createTwoFilesPatch, createPatch, formatPatch } from './patch/create.js';
|
||||
import type { StructuredPatchOptionsAbortable, StructuredPatchOptionsNonabortable, CreatePatchOptionsAbortable, CreatePatchOptionsNonabortable } from './patch/create.js';
|
||||
import { convertChangesToDMP } from './convert/dmp.js';
|
||||
import { convertChangesToXML } from './convert/xml.js';
|
||||
import type { ChangeObject, Change, DiffArraysOptionsAbortable, DiffArraysOptionsNonabortable, DiffCharsOptionsAbortable, DiffCharsOptionsNonabortable, DiffLinesOptionsAbortable, DiffLinesOptionsNonabortable, DiffWordsOptionsAbortable, DiffWordsOptionsNonabortable, DiffSentencesOptionsAbortable, DiffSentencesOptionsNonabortable, DiffJsonOptionsAbortable, DiffJsonOptionsNonabortable, DiffCssOptionsAbortable, DiffCssOptionsNonabortable, StructuredPatch, StructuredPatchHunk } from './types.js';
|
||||
export { Diff, diffChars, characterDiff, diffWords, wordDiff, diffWordsWithSpace, wordsWithSpaceDiff, diffLines, lineDiff, diffTrimmedLines, diffSentences, sentenceDiff, diffCss, cssDiff, diffJson, jsonDiff, diffArrays, arrayDiff, structuredPatch, createTwoFilesPatch, createPatch, formatPatch, applyPatch, applyPatches, parsePatch, reversePatch, convertChangesToDMP, convertChangesToXML, canonicalize };
|
||||
export type { ChangeObject, Change, DiffArraysOptionsAbortable, DiffArraysOptionsNonabortable, DiffCharsOptionsAbortable, DiffCharsOptionsNonabortable, DiffLinesOptionsAbortable, DiffLinesOptionsNonabortable, DiffWordsOptionsAbortable, DiffWordsOptionsNonabortable, DiffSentencesOptionsAbortable, DiffSentencesOptionsNonabortable, DiffJsonOptionsAbortable, DiffJsonOptionsNonabortable, DiffCssOptionsAbortable, DiffCssOptionsNonabortable, StructuredPatch, StructuredPatchHunk, ApplyPatchOptions, ApplyPatchesOptions, StructuredPatchOptionsAbortable, StructuredPatchOptionsNonabortable, CreatePatchOptionsAbortable, CreatePatchOptionsNonabortable };
|
||||
//# sourceMappingURL=index.d.ts.map
|
||||
1
Frontend-Learner/node_modules/diff/libesm/index.d.ts.map
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/index.d.ts.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAgBA,OAAO,IAAI,MAAM,gBAAgB,CAAC;AAClC,OAAO,EAAC,SAAS,EAAE,aAAa,EAAC,MAAM,qBAAqB,CAAC;AAC7D,OAAO,EAAC,SAAS,EAAE,kBAAkB,EAAE,QAAQ,EAAE,kBAAkB,EAAC,MAAM,gBAAgB,CAAC;AAC3F,OAAO,EAAC,SAAS,EAAE,gBAAgB,EAAE,QAAQ,EAAC,MAAM,gBAAgB,CAAC;AACrE,OAAO,EAAC,aAAa,EAAE,YAAY,EAAC,MAAM,oBAAoB,CAAC;AAE/D,OAAO,EAAC,OAAO,EAAE,OAAO,EAAC,MAAM,eAAe,CAAC;AAC/C,OAAO,EAAC,QAAQ,EAAE,YAAY,EAAE,QAAQ,EAAC,MAAM,gBAAgB,CAAC;AAEhE,OAAO,EAAC,UAAU,EAAE,SAAS,EAAC,MAAM,iBAAiB,CAAC;AAEtD,OAAO,EAAC,UAAU,EAAE,YAAY,EAAC,MAAM,kBAAkB,CAAC;AAC1D,OAAO,KAAK,EAAC,iBAAiB,EAAE,mBAAmB,EAAC,MAAM,kBAAkB,CAAC;AAC7E,OAAO,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAC5C,OAAO,EAAC,YAAY,EAAC,MAAM,oBAAoB,CAAC;AAChD,OAAO,EACL,eAAe,EACf,mBAAmB,EACnB,WAAW,EACX,WAAW,EACZ,MAAM,mBAAmB,CAAC;AAC3B,OAAO,KAAK,EACV,+BAA+B,EAC/B,kCAAkC,EAClC,2BAA2B,EAC3B,8BAA8B,EAC/B,MAAM,mBAAmB,CAAC;AAE3B,OAAO,EAAC,mBAAmB,EAAC,MAAM,kBAAkB,CAAC;AACrD,OAAO,EAAC,mBAAmB,EAAC,MAAM,kBAAkB,CAAC;AACrD,OAAO,KAAK,EACV,YAAY,EACZ,MAAM,EACN,0BAA0B,EAC1B,6BAA6B,EAC7B,yBAAyB,EACzB,4BAA4B,EAC5B,yBAAyB,EACzB,4BAA4B,EAC5B,yBAAyB,EACzB,4BAA4B,EAC5B,6BAA6B,EAC7B,gCAAgC,EAChC,wBAAwB,EACxB,2BAA2B,EAC3B,uBAAuB,EACvB,0BAA0B,EAC1B,eAAe,EACf,mBAAmB,EACpB,MAAM,YAAY,CAAC;AAEpB,OAAO,EACL,IAAI,EAEJ,SAAS,EACT,aAAa,EACb,SAAS,EACT,QAAQ,EACR,kBAAkB,EAClB,kBAAkB,EAClB,SAAS,EACT,QAAQ,EACR,gBAAgB,EAChB,aAAa,EACb,YAAY,EACZ,OAAO,EACP,OAAO,EACP,QAAQ,EACR,QAAQ,EACR,UAAU,EACV,SAAS,EAET,eAAe,EACf,mBAAmB,EACnB,WAAW,EACX,WAAW,EACX,UAAU,EACV,YAAY,EACZ,UAAU,EACV,YAAY,EACZ,mBAAmB,EACnB,mBAAmB,EACnB,YAAY,EACb,CAAC;AAEF,YAAY,EACV,YAAY,EACZ,MAAM,EACN,0BAA0B,EAC1B,6BAA6B,EAC7B,yBAAyB,EACzB,4BAA4B,EAC5B,yBAAyB,EACzB,4BAA4B,EAC5B,yBAAyB,EACzB,4BAA4B,EAC5B,6BAA6B,EAC7B,gCAAgC,EAChC,wBAAwB,EACxB,2BAA2B,EAC3B,uBAAuB,EACvB,0BAA0B,EAC1B,eAAe,EACf,mBAAmB,EAEnB,iBAAiB,EACjB,mBAAmB,EAEnB,+BAA+B,EAC/B,kCAAkC,EAClC,2BAA2B,EAC3B,8BAA8B,EAC/B,CAAC"}
|
||||
30
Frontend-Learner/node_modules/diff/libesm/index.js
generated
vendored
Normal file
30
Frontend-Learner/node_modules/diff/libesm/index.js
generated
vendored
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
/* See LICENSE file for terms of use */
|
||||
/*
|
||||
* Text diff implementation.
|
||||
*
|
||||
* This library supports the following APIs:
|
||||
* Diff.diffChars: Character by character diff
|
||||
* Diff.diffWords: Word (as defined by \b regex) diff which ignores whitespace
|
||||
* Diff.diffLines: Line based diff
|
||||
*
|
||||
* Diff.diffCss: Diff targeted at CSS content
|
||||
*
|
||||
* These methods are based on the implementation proposed in
|
||||
* "An O(ND) Difference Algorithm and its Variations" (Myers, 1986).
|
||||
* http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.4.6927
|
||||
*/
|
||||
import Diff from './diff/base.js';
|
||||
import { diffChars, characterDiff } from './diff/character.js';
|
||||
import { diffWords, diffWordsWithSpace, wordDiff, wordsWithSpaceDiff } from './diff/word.js';
|
||||
import { diffLines, diffTrimmedLines, lineDiff } from './diff/line.js';
|
||||
import { diffSentences, sentenceDiff } from './diff/sentence.js';
|
||||
import { diffCss, cssDiff } from './diff/css.js';
|
||||
import { diffJson, canonicalize, jsonDiff } from './diff/json.js';
|
||||
import { diffArrays, arrayDiff } from './diff/array.js';
|
||||
import { applyPatch, applyPatches } from './patch/apply.js';
|
||||
import { parsePatch } from './patch/parse.js';
|
||||
import { reversePatch } from './patch/reverse.js';
|
||||
import { structuredPatch, createTwoFilesPatch, createPatch, formatPatch } from './patch/create.js';
|
||||
import { convertChangesToDMP } from './convert/dmp.js';
|
||||
import { convertChangesToXML } from './convert/xml.js';
|
||||
export { Diff, diffChars, characterDiff, diffWords, wordDiff, diffWordsWithSpace, wordsWithSpaceDiff, diffLines, lineDiff, diffTrimmedLines, diffSentences, sentenceDiff, diffCss, cssDiff, diffJson, jsonDiff, diffArrays, arrayDiff, structuredPatch, createTwoFilesPatch, createPatch, formatPatch, applyPatch, applyPatches, parsePatch, reversePatch, convertChangesToDMP, convertChangesToXML, canonicalize };
|
||||
1
Frontend-Learner/node_modules/diff/libesm/package.json
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/package.json
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"type":"module","sideEffects":false}
|
||||
62
Frontend-Learner/node_modules/diff/libesm/patch/apply.d.ts
generated
vendored
Normal file
62
Frontend-Learner/node_modules/diff/libesm/patch/apply.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
import type { StructuredPatch } from '../types.js';
|
||||
export interface ApplyPatchOptions {
|
||||
/**
|
||||
* Maximum Levenshtein distance (in lines deleted, added, or subtituted) between the context shown in a patch hunk and the lines found in the file.
|
||||
* @default 0
|
||||
*/
|
||||
fuzzFactor?: number;
|
||||
/**
|
||||
* If `true`, and if the file to be patched consistently uses different line endings to the patch (i.e. either the file always uses Unix line endings while the patch uses Windows ones, or vice versa), then `applyPatch` will behave as if the line endings in the patch were the same as those in the source file.
|
||||
* (If `false`, the patch will usually fail to apply in such circumstances since lines deleted in the patch won't be considered to match those in the source file.)
|
||||
* @default true
|
||||
*/
|
||||
autoConvertLineEndings?: boolean;
|
||||
/**
|
||||
* Callback used to compare to given lines to determine if they should be considered equal when patching.
|
||||
* Defaults to strict equality but may be overridden to provide fuzzier comparison.
|
||||
* Should return false if the lines should be rejected.
|
||||
*/
|
||||
compareLine?: (lineNumber: number, line: string, operation: string, patchContent: string) => boolean;
|
||||
}
|
||||
/**
|
||||
* attempts to apply a unified diff patch.
|
||||
*
|
||||
* Hunks are applied first to last.
|
||||
* `applyPatch` first tries to apply the first hunk at the line number specified in the hunk header, and with all context lines matching exactly.
|
||||
* If that fails, it tries scanning backwards and forwards, one line at a time, to find a place to apply the hunk where the context lines match exactly.
|
||||
* If that still fails, and `fuzzFactor` is greater than zero, it increments the maximum number of mismatches (missing, extra, or changed context lines) that there can be between the hunk context and a region where we are trying to apply the patch such that the hunk will still be considered to match.
|
||||
* Regardless of `fuzzFactor`, lines to be deleted in the hunk *must* be present for a hunk to match, and the context lines *immediately* before and after an insertion must match exactly.
|
||||
*
|
||||
* Once a hunk is successfully fitted, the process begins again with the next hunk.
|
||||
* Regardless of `fuzzFactor`, later hunks must be applied later in the file than earlier hunks.
|
||||
*
|
||||
* If a hunk cannot be successfully fitted *anywhere* with fewer than `fuzzFactor` mismatches, `applyPatch` fails and returns `false`.
|
||||
*
|
||||
* If a hunk is successfully fitted but not at the line number specified by the hunk header, all subsequent hunks have their target line number adjusted accordingly.
|
||||
* (e.g. if the first hunk is applied 10 lines below where the hunk header said it should fit, `applyPatch` will *start* looking for somewhere to apply the second hunk 10 lines below where its hunk header says it goes.)
|
||||
*
|
||||
* If the patch was applied successfully, returns a string containing the patched text.
|
||||
* If the patch could not be applied (because some hunks in the patch couldn't be fitted to the text in `source`), `applyPatch` returns false.
|
||||
*
|
||||
* @param patch a string diff or the output from the `parsePatch` or `structuredPatch` methods.
|
||||
*/
|
||||
export declare function applyPatch(source: string, patch: string | StructuredPatch | [StructuredPatch], options?: ApplyPatchOptions): string | false;
|
||||
export interface ApplyPatchesOptions extends ApplyPatchOptions {
|
||||
loadFile: (index: StructuredPatch, callback: (err: any, data: string) => void) => void;
|
||||
patched: (index: StructuredPatch, content: string | false, callback: (err: any) => void) => void;
|
||||
complete: (err?: any) => void;
|
||||
}
|
||||
/**
|
||||
* applies one or more patches.
|
||||
*
|
||||
* `patch` may be either an array of structured patch objects, or a string representing a patch in unified diff format (which may patch one or more files).
|
||||
*
|
||||
* This method will iterate over the contents of the patch and apply to data provided through callbacks. The general flow for each patch index is:
|
||||
*
|
||||
* - `options.loadFile(index, callback)` is called. The caller should then load the contents of the file and then pass that to the `callback(err, data)` callback. Passing an `err` will terminate further patch execution.
|
||||
* - `options.patched(index, content, callback)` is called once the patch has been applied. `content` will be the return value from `applyPatch`. When it's ready, the caller should call `callback(err)` callback. Passing an `err` will terminate further patch execution.
|
||||
*
|
||||
* Once all patches have been applied or an error occurs, the `options.complete(err)` callback is made.
|
||||
*/
|
||||
export declare function applyPatches(uniDiff: string | StructuredPatch[], options: ApplyPatchesOptions): void;
|
||||
//# sourceMappingURL=apply.d.ts.map
|
||||
1
Frontend-Learner/node_modules/diff/libesm/patch/apply.d.ts.map
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/patch/apply.d.ts.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"apply.d.ts","sourceRoot":"","sources":["../../src/patch/apply.ts"],"names":[],"mappings":"AAIA,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,aAAa,CAAC;AAEnD,MAAM,WAAW,iBAAiB;IAChC;;;OAGG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;;;OAIG;IACH,sBAAsB,CAAC,EAAE,OAAO,CAAC;IACjC;;;;OAIG;IACH,WAAW,CAAC,EAAE,CAAC,UAAU,EAAE,MAAM,EAAE,IAAI,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,EAAE,YAAY,EAAE,MAAM,KAAK,OAAO,CAAC;CACtG;AAOD;;;;;;;;;;;;;;;;;;;;;GAqBG;AACH,wBAAgB,UAAU,CACxB,MAAM,EAAE,MAAM,EACd,KAAK,EAAE,MAAM,GAAG,eAAe,GAAG,CAAC,eAAe,CAAC,EACnD,OAAO,GAAE,iBAAsB,GAC9B,MAAM,GAAG,KAAK,CAehB;AAyPD,MAAM,WAAW,mBAAoB,SAAQ,iBAAiB;IAC5D,QAAQ,EAAE,CAAC,KAAK,EAAE,eAAe,EAAE,QAAQ,EAAE,CAAC,GAAG,EAAE,GAAG,EAAE,IAAI,EAAE,MAAM,KAAK,IAAI,KAAK,IAAI,CAAC;IACvF,OAAO,EAAE,CAAC,KAAK,EAAE,eAAe,EAAE,OAAO,EAAE,MAAM,GAAG,KAAK,EAAE,QAAQ,EAAE,CAAC,GAAG,EAAE,GAAG,KAAK,IAAI,KAAK,IAAI,CAAC;IACjG,QAAQ,EAAE,CAAC,GAAG,CAAC,EAAE,GAAG,KAAK,IAAI,CAAC;CAC/B;AAED;;;;;;;;;;;GAWG;AACH,wBAAgB,YAAY,CAAC,OAAO,EAAE,MAAM,GAAG,eAAe,EAAE,EAAE,OAAO,EAAE,mBAAmB,GAAG,IAAI,CA0BpG"}
|
||||
257
Frontend-Learner/node_modules/diff/libesm/patch/apply.js
generated
vendored
Normal file
257
Frontend-Learner/node_modules/diff/libesm/patch/apply.js
generated
vendored
Normal file
|
|
@ -0,0 +1,257 @@
|
|||
import { hasOnlyWinLineEndings, hasOnlyUnixLineEndings } from '../util/string.js';
|
||||
import { isWin, isUnix, unixToWin, winToUnix } from './line-endings.js';
|
||||
import { parsePatch } from './parse.js';
|
||||
import distanceIterator from '../util/distance-iterator.js';
|
||||
/**
|
||||
* attempts to apply a unified diff patch.
|
||||
*
|
||||
* Hunks are applied first to last.
|
||||
* `applyPatch` first tries to apply the first hunk at the line number specified in the hunk header, and with all context lines matching exactly.
|
||||
* If that fails, it tries scanning backwards and forwards, one line at a time, to find a place to apply the hunk where the context lines match exactly.
|
||||
* If that still fails, and `fuzzFactor` is greater than zero, it increments the maximum number of mismatches (missing, extra, or changed context lines) that there can be between the hunk context and a region where we are trying to apply the patch such that the hunk will still be considered to match.
|
||||
* Regardless of `fuzzFactor`, lines to be deleted in the hunk *must* be present for a hunk to match, and the context lines *immediately* before and after an insertion must match exactly.
|
||||
*
|
||||
* Once a hunk is successfully fitted, the process begins again with the next hunk.
|
||||
* Regardless of `fuzzFactor`, later hunks must be applied later in the file than earlier hunks.
|
||||
*
|
||||
* If a hunk cannot be successfully fitted *anywhere* with fewer than `fuzzFactor` mismatches, `applyPatch` fails and returns `false`.
|
||||
*
|
||||
* If a hunk is successfully fitted but not at the line number specified by the hunk header, all subsequent hunks have their target line number adjusted accordingly.
|
||||
* (e.g. if the first hunk is applied 10 lines below where the hunk header said it should fit, `applyPatch` will *start* looking for somewhere to apply the second hunk 10 lines below where its hunk header says it goes.)
|
||||
*
|
||||
* If the patch was applied successfully, returns a string containing the patched text.
|
||||
* If the patch could not be applied (because some hunks in the patch couldn't be fitted to the text in `source`), `applyPatch` returns false.
|
||||
*
|
||||
* @param patch a string diff or the output from the `parsePatch` or `structuredPatch` methods.
|
||||
*/
|
||||
export function applyPatch(source, patch, options = {}) {
|
||||
let patches;
|
||||
if (typeof patch === 'string') {
|
||||
patches = parsePatch(patch);
|
||||
}
|
||||
else if (Array.isArray(patch)) {
|
||||
patches = patch;
|
||||
}
|
||||
else {
|
||||
patches = [patch];
|
||||
}
|
||||
if (patches.length > 1) {
|
||||
throw new Error('applyPatch only works with a single input.');
|
||||
}
|
||||
return applyStructuredPatch(source, patches[0], options);
|
||||
}
|
||||
function applyStructuredPatch(source, patch, options = {}) {
|
||||
if (options.autoConvertLineEndings || options.autoConvertLineEndings == null) {
|
||||
if (hasOnlyWinLineEndings(source) && isUnix(patch)) {
|
||||
patch = unixToWin(patch);
|
||||
}
|
||||
else if (hasOnlyUnixLineEndings(source) && isWin(patch)) {
|
||||
patch = winToUnix(patch);
|
||||
}
|
||||
}
|
||||
// Apply the diff to the input
|
||||
const lines = source.split('\n'), hunks = patch.hunks, compareLine = options.compareLine || ((lineNumber, line, operation, patchContent) => line === patchContent), fuzzFactor = options.fuzzFactor || 0;
|
||||
let minLine = 0;
|
||||
if (fuzzFactor < 0 || !Number.isInteger(fuzzFactor)) {
|
||||
throw new Error('fuzzFactor must be a non-negative integer');
|
||||
}
|
||||
// Special case for empty patch.
|
||||
if (!hunks.length) {
|
||||
return source;
|
||||
}
|
||||
// Before anything else, handle EOFNL insertion/removal. If the patch tells us to make a change
|
||||
// to the EOFNL that is redundant/impossible - i.e. to remove a newline that's not there, or add a
|
||||
// newline that already exists - then we either return false and fail to apply the patch (if
|
||||
// fuzzFactor is 0) or simply ignore the problem and do nothing (if fuzzFactor is >0).
|
||||
// If we do need to remove/add a newline at EOF, this will always be in the final hunk:
|
||||
let prevLine = '', removeEOFNL = false, addEOFNL = false;
|
||||
for (let i = 0; i < hunks[hunks.length - 1].lines.length; i++) {
|
||||
const line = hunks[hunks.length - 1].lines[i];
|
||||
if (line[0] == '\\') {
|
||||
if (prevLine[0] == '+') {
|
||||
removeEOFNL = true;
|
||||
}
|
||||
else if (prevLine[0] == '-') {
|
||||
addEOFNL = true;
|
||||
}
|
||||
}
|
||||
prevLine = line;
|
||||
}
|
||||
if (removeEOFNL) {
|
||||
if (addEOFNL) {
|
||||
// This means the final line gets changed but doesn't have a trailing newline in either the
|
||||
// original or patched version. In that case, we do nothing if fuzzFactor > 0, and if
|
||||
// fuzzFactor is 0, we simply validate that the source file has no trailing newline.
|
||||
if (!fuzzFactor && lines[lines.length - 1] == '') {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else if (lines[lines.length - 1] == '') {
|
||||
lines.pop();
|
||||
}
|
||||
else if (!fuzzFactor) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else if (addEOFNL) {
|
||||
if (lines[lines.length - 1] != '') {
|
||||
lines.push('');
|
||||
}
|
||||
else if (!fuzzFactor) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Checks if the hunk can be made to fit at the provided location with at most `maxErrors`
|
||||
* insertions, substitutions, or deletions, while ensuring also that:
|
||||
* - lines deleted in the hunk match exactly, and
|
||||
* - wherever an insertion operation or block of insertion operations appears in the hunk, the
|
||||
* immediately preceding and following lines of context match exactly
|
||||
*
|
||||
* `toPos` should be set such that lines[toPos] is meant to match hunkLines[0].
|
||||
*
|
||||
* If the hunk can be applied, returns an object with properties `oldLineLastI` and
|
||||
* `replacementLines`. Otherwise, returns null.
|
||||
*/
|
||||
function applyHunk(hunkLines, toPos, maxErrors, hunkLinesI = 0, lastContextLineMatched = true, patchedLines = [], patchedLinesLength = 0) {
|
||||
let nConsecutiveOldContextLines = 0;
|
||||
let nextContextLineMustMatch = false;
|
||||
for (; hunkLinesI < hunkLines.length; hunkLinesI++) {
|
||||
const hunkLine = hunkLines[hunkLinesI], operation = (hunkLine.length > 0 ? hunkLine[0] : ' '), content = (hunkLine.length > 0 ? hunkLine.substr(1) : hunkLine);
|
||||
if (operation === '-') {
|
||||
if (compareLine(toPos + 1, lines[toPos], operation, content)) {
|
||||
toPos++;
|
||||
nConsecutiveOldContextLines = 0;
|
||||
}
|
||||
else {
|
||||
if (!maxErrors || lines[toPos] == null) {
|
||||
return null;
|
||||
}
|
||||
patchedLines[patchedLinesLength] = lines[toPos];
|
||||
return applyHunk(hunkLines, toPos + 1, maxErrors - 1, hunkLinesI, false, patchedLines, patchedLinesLength + 1);
|
||||
}
|
||||
}
|
||||
if (operation === '+') {
|
||||
if (!lastContextLineMatched) {
|
||||
return null;
|
||||
}
|
||||
patchedLines[patchedLinesLength] = content;
|
||||
patchedLinesLength++;
|
||||
nConsecutiveOldContextLines = 0;
|
||||
nextContextLineMustMatch = true;
|
||||
}
|
||||
if (operation === ' ') {
|
||||
nConsecutiveOldContextLines++;
|
||||
patchedLines[patchedLinesLength] = lines[toPos];
|
||||
if (compareLine(toPos + 1, lines[toPos], operation, content)) {
|
||||
patchedLinesLength++;
|
||||
lastContextLineMatched = true;
|
||||
nextContextLineMustMatch = false;
|
||||
toPos++;
|
||||
}
|
||||
else {
|
||||
if (nextContextLineMustMatch || !maxErrors) {
|
||||
return null;
|
||||
}
|
||||
// Consider 3 possibilities in sequence:
|
||||
// 1. lines contains a *substitution* not included in the patch context, or
|
||||
// 2. lines contains an *insertion* not included in the patch context, or
|
||||
// 3. lines contains a *deletion* not included in the patch context
|
||||
// The first two options are of course only possible if the line from lines is non-null -
|
||||
// i.e. only option 3 is possible if we've overrun the end of the old file.
|
||||
return (lines[toPos] && (applyHunk(hunkLines, toPos + 1, maxErrors - 1, hunkLinesI + 1, false, patchedLines, patchedLinesLength + 1) || applyHunk(hunkLines, toPos + 1, maxErrors - 1, hunkLinesI, false, patchedLines, patchedLinesLength + 1)) || applyHunk(hunkLines, toPos, maxErrors - 1, hunkLinesI + 1, false, patchedLines, patchedLinesLength));
|
||||
}
|
||||
}
|
||||
}
|
||||
// Before returning, trim any unmodified context lines off the end of patchedLines and reduce
|
||||
// toPos (and thus oldLineLastI) accordingly. This allows later hunks to be applied to a region
|
||||
// that starts in this hunk's trailing context.
|
||||
patchedLinesLength -= nConsecutiveOldContextLines;
|
||||
toPos -= nConsecutiveOldContextLines;
|
||||
patchedLines.length = patchedLinesLength;
|
||||
return {
|
||||
patchedLines,
|
||||
oldLineLastI: toPos - 1
|
||||
};
|
||||
}
|
||||
const resultLines = [];
|
||||
// Search best fit offsets for each hunk based on the previous ones
|
||||
let prevHunkOffset = 0;
|
||||
for (let i = 0; i < hunks.length; i++) {
|
||||
const hunk = hunks[i];
|
||||
let hunkResult;
|
||||
const maxLine = lines.length - hunk.oldLines + fuzzFactor;
|
||||
let toPos;
|
||||
for (let maxErrors = 0; maxErrors <= fuzzFactor; maxErrors++) {
|
||||
toPos = hunk.oldStart + prevHunkOffset - 1;
|
||||
const iterator = distanceIterator(toPos, minLine, maxLine);
|
||||
for (; toPos !== undefined; toPos = iterator()) {
|
||||
hunkResult = applyHunk(hunk.lines, toPos, maxErrors);
|
||||
if (hunkResult) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (hunkResult) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!hunkResult) {
|
||||
return false;
|
||||
}
|
||||
// Copy everything from the end of where we applied the last hunk to the start of this hunk
|
||||
for (let i = minLine; i < toPos; i++) {
|
||||
resultLines.push(lines[i]);
|
||||
}
|
||||
// Add the lines produced by applying the hunk:
|
||||
for (let i = 0; i < hunkResult.patchedLines.length; i++) {
|
||||
const line = hunkResult.patchedLines[i];
|
||||
resultLines.push(line);
|
||||
}
|
||||
// Set lower text limit to end of the current hunk, so next ones don't try
|
||||
// to fit over already patched text
|
||||
minLine = hunkResult.oldLineLastI + 1;
|
||||
// Note the offset between where the patch said the hunk should've applied and where we
|
||||
// applied it, so we can adjust future hunks accordingly:
|
||||
prevHunkOffset = toPos + 1 - hunk.oldStart;
|
||||
}
|
||||
// Copy over the rest of the lines from the old text
|
||||
for (let i = minLine; i < lines.length; i++) {
|
||||
resultLines.push(lines[i]);
|
||||
}
|
||||
return resultLines.join('\n');
|
||||
}
|
||||
/**
|
||||
* applies one or more patches.
|
||||
*
|
||||
* `patch` may be either an array of structured patch objects, or a string representing a patch in unified diff format (which may patch one or more files).
|
||||
*
|
||||
* This method will iterate over the contents of the patch and apply to data provided through callbacks. The general flow for each patch index is:
|
||||
*
|
||||
* - `options.loadFile(index, callback)` is called. The caller should then load the contents of the file and then pass that to the `callback(err, data)` callback. Passing an `err` will terminate further patch execution.
|
||||
* - `options.patched(index, content, callback)` is called once the patch has been applied. `content` will be the return value from `applyPatch`. When it's ready, the caller should call `callback(err)` callback. Passing an `err` will terminate further patch execution.
|
||||
*
|
||||
* Once all patches have been applied or an error occurs, the `options.complete(err)` callback is made.
|
||||
*/
|
||||
export function applyPatches(uniDiff, options) {
|
||||
const spDiff = typeof uniDiff === 'string' ? parsePatch(uniDiff) : uniDiff;
|
||||
let currentIndex = 0;
|
||||
function processIndex() {
|
||||
const index = spDiff[currentIndex++];
|
||||
if (!index) {
|
||||
return options.complete();
|
||||
}
|
||||
options.loadFile(index, function (err, data) {
|
||||
if (err) {
|
||||
return options.complete(err);
|
||||
}
|
||||
const updatedContent = applyPatch(data, index, options);
|
||||
options.patched(index, updatedContent, function (err) {
|
||||
if (err) {
|
||||
return options.complete(err);
|
||||
}
|
||||
processIndex();
|
||||
});
|
||||
});
|
||||
}
|
||||
processIndex();
|
||||
}
|
||||
100
Frontend-Learner/node_modules/diff/libesm/patch/create.d.ts
generated
vendored
Normal file
100
Frontend-Learner/node_modules/diff/libesm/patch/create.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,100 @@
|
|||
import type { StructuredPatch, DiffLinesOptionsAbortable, DiffLinesOptionsNonabortable, AbortableDiffOptions } from '../types.js';
|
||||
type StructuredPatchCallbackAbortable = (patch: StructuredPatch | undefined) => void;
|
||||
type StructuredPatchCallbackNonabortable = (patch: StructuredPatch) => void;
|
||||
interface _StructuredPatchOptionsAbortable extends Pick<DiffLinesOptionsAbortable, 'ignoreWhitespace' | 'stripTrailingCr'> {
|
||||
/**
|
||||
* describes how many lines of context should be included.
|
||||
* You can set this to `Number.MAX_SAFE_INTEGER` or `Infinity` to include the entire file content in one hunk.
|
||||
* @default 4
|
||||
*/
|
||||
context?: number;
|
||||
callback?: StructuredPatchCallbackAbortable;
|
||||
}
|
||||
export type StructuredPatchOptionsAbortable = _StructuredPatchOptionsAbortable & AbortableDiffOptions;
|
||||
export interface StructuredPatchOptionsNonabortable extends Pick<DiffLinesOptionsNonabortable, 'ignoreWhitespace' | 'stripTrailingCr'> {
|
||||
context?: number;
|
||||
callback?: StructuredPatchCallbackNonabortable;
|
||||
}
|
||||
interface StructuredPatchCallbackOptionAbortable {
|
||||
/**
|
||||
* If provided, the diff will be computed in async mode to avoid blocking the event loop while the diff is calculated.
|
||||
* The value of the `callback` option should be a function and will be passed the computed diff or patch as its first argument.
|
||||
*/
|
||||
callback: StructuredPatchCallbackAbortable;
|
||||
}
|
||||
interface StructuredPatchCallbackOptionNonabortable {
|
||||
/**
|
||||
* If provided, the diff will be computed in async mode to avoid blocking the event loop while the diff is calculated.
|
||||
* The value of the `callback` option should be a function and will be passed the computed diff or patch as its first argument.
|
||||
*/
|
||||
callback: StructuredPatchCallbackNonabortable;
|
||||
}
|
||||
/**
|
||||
* returns an object with an array of hunk objects.
|
||||
*
|
||||
* This method is similar to createTwoFilesPatch, but returns a data structure suitable for further processing.
|
||||
* @param oldFileName String to be output in the filename section of the patch for the removals
|
||||
* @param newFileName String to be output in the filename section of the patch for the additions
|
||||
* @param oldStr Original string value
|
||||
* @param newStr New string value
|
||||
* @param oldHeader Optional additional information to include in the old file header.
|
||||
* @param newHeader Optional additional information to include in the new file header.
|
||||
*/
|
||||
export declare function structuredPatch(oldFileName: string, newFileName: string, oldStr: string, newStr: string, oldHeader: string | undefined, newHeader: string | undefined, options: StructuredPatchCallbackNonabortable): undefined;
|
||||
export declare function structuredPatch(oldFileName: string, newFileName: string, oldStr: string, newStr: string, oldHeader: string | undefined, newHeader: string | undefined, options: StructuredPatchOptionsAbortable & StructuredPatchCallbackOptionAbortable): undefined;
|
||||
export declare function structuredPatch(oldFileName: string, newFileName: string, oldStr: string, newStr: string, oldHeader: string | undefined, newHeader: string | undefined, options: StructuredPatchOptionsNonabortable & StructuredPatchCallbackOptionNonabortable): undefined;
|
||||
export declare function structuredPatch(oldFileName: string, newFileName: string, oldStr: string, newStr: string, oldHeader: string | undefined, newHeader: string | undefined, options: StructuredPatchOptionsAbortable): StructuredPatch | undefined;
|
||||
export declare function structuredPatch(oldFileName: string, newFileName: string, oldStr: string, newStr: string, oldHeader?: string, newHeader?: string, options?: StructuredPatchOptionsNonabortable): StructuredPatch;
|
||||
/**
|
||||
* creates a unified diff patch.
|
||||
* @param patch either a single structured patch object (as returned by `structuredPatch`) or an array of them (as returned by `parsePatch`)
|
||||
*/
|
||||
export declare function formatPatch(patch: StructuredPatch | StructuredPatch[]): string;
|
||||
type CreatePatchCallbackAbortable = (patch: string | undefined) => void;
|
||||
type CreatePatchCallbackNonabortable = (patch: string) => void;
|
||||
interface _CreatePatchOptionsAbortable extends Pick<DiffLinesOptionsAbortable, 'ignoreWhitespace' | 'stripTrailingCr'> {
|
||||
context?: number;
|
||||
callback?: CreatePatchCallbackAbortable;
|
||||
}
|
||||
export type CreatePatchOptionsAbortable = _CreatePatchOptionsAbortable & AbortableDiffOptions;
|
||||
export interface CreatePatchOptionsNonabortable extends Pick<DiffLinesOptionsNonabortable, 'ignoreWhitespace' | 'stripTrailingCr'> {
|
||||
context?: number;
|
||||
callback?: CreatePatchCallbackNonabortable;
|
||||
}
|
||||
interface CreatePatchCallbackOptionAbortable {
|
||||
callback: CreatePatchCallbackAbortable;
|
||||
}
|
||||
interface CreatePatchCallbackOptionNonabortable {
|
||||
callback: CreatePatchCallbackNonabortable;
|
||||
}
|
||||
/**
|
||||
* creates a unified diff patch by first computing a diff with `diffLines` and then serializing it to unified diff format.
|
||||
* @param oldFileName String to be output in the filename section of the patch for the removals
|
||||
* @param newFileName String to be output in the filename section of the patch for the additions
|
||||
* @param oldStr Original string value
|
||||
* @param newStr New string value
|
||||
* @param oldHeader Optional additional information to include in the old file header.
|
||||
* @param newHeader Optional additional information to include in the new file header.
|
||||
*/
|
||||
export declare function createTwoFilesPatch(oldFileName: string, newFileName: string, oldStr: string, newStr: string, oldHeader: string | undefined, newHeader: string | undefined, options: CreatePatchCallbackNonabortable): undefined;
|
||||
export declare function createTwoFilesPatch(oldFileName: string, newFileName: string, oldStr: string, newStr: string, oldHeader: string | undefined, newHeader: string | undefined, options: CreatePatchOptionsAbortable & CreatePatchCallbackOptionAbortable): undefined;
|
||||
export declare function createTwoFilesPatch(oldFileName: string, newFileName: string, oldStr: string, newStr: string, oldHeader: string | undefined, newHeader: string | undefined, options: CreatePatchOptionsNonabortable & CreatePatchCallbackOptionNonabortable): undefined;
|
||||
export declare function createTwoFilesPatch(oldFileName: string, newFileName: string, oldStr: string, newStr: string, oldHeader: string | undefined, newHeader: string | undefined, options: CreatePatchOptionsAbortable): string | undefined;
|
||||
export declare function createTwoFilesPatch(oldFileName: string, newFileName: string, oldStr: string, newStr: string, oldHeader?: string, newHeader?: string, options?: CreatePatchOptionsNonabortable): string;
|
||||
/**
|
||||
* creates a unified diff patch.
|
||||
*
|
||||
* Just like createTwoFilesPatch, but with oldFileName being equal to newFileName.
|
||||
* @param fileName String to be output in the filename section of the patch
|
||||
* @param oldStr Original string value
|
||||
* @param newStr New string value
|
||||
* @param oldHeader Optional additional information to include in the old file header.
|
||||
* @param newHeader Optional additional information to include in the new file header.
|
||||
*/
|
||||
export declare function createPatch(fileName: string, oldStr: string, newStr: string, oldHeader: string | undefined, newHeader: string | undefined, options: CreatePatchCallbackNonabortable): undefined;
|
||||
export declare function createPatch(fileName: string, oldStr: string, newStr: string, oldHeader: string | undefined, newHeader: string | undefined, options: CreatePatchOptionsAbortable & CreatePatchCallbackOptionAbortable): undefined;
|
||||
export declare function createPatch(fileName: string, oldStr: string, newStr: string, oldHeader: string | undefined, newHeader: string | undefined, options: CreatePatchOptionsNonabortable & CreatePatchCallbackOptionNonabortable): undefined;
|
||||
export declare function createPatch(fileName: string, oldStr: string, newStr: string, oldHeader: string | undefined, newHeader: string | undefined, options: CreatePatchOptionsAbortable): string | undefined;
|
||||
export declare function createPatch(fileName: string, oldStr: string, newStr: string, oldHeader?: string, newHeader?: string, options?: CreatePatchOptionsNonabortable): string;
|
||||
export {};
|
||||
//# sourceMappingURL=create.d.ts.map
|
||||
1
Frontend-Learner/node_modules/diff/libesm/patch/create.d.ts.map
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/patch/create.d.ts.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"create.d.ts","sourceRoot":"","sources":["../../src/patch/create.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,eAAe,EAAE,yBAAyB,EAAE,4BAA4B,EAAE,oBAAoB,EAAgB,MAAM,aAAa,CAAC;AAEhJ,KAAK,gCAAgC,GAAG,CAAC,KAAK,EAAE,eAAe,GAAG,SAAS,KAAK,IAAI,CAAC;AACrF,KAAK,mCAAmC,GAAG,CAAC,KAAK,EAAE,eAAe,KAAK,IAAI,CAAC;AAE5E,UAAU,gCAAiC,SAAQ,IAAI,CAAC,yBAAyB,EAAE,kBAAkB,GAAG,iBAAiB,CAAC;IACxH;;;;OAIG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,QAAQ,CAAC,EAAE,gCAAgC,CAAC;CAC7C;AACD,MAAM,MAAM,+BAA+B,GAAG,gCAAgC,GAAG,oBAAoB,CAAC;AACtG,MAAM,WAAW,kCAAmC,SAAQ,IAAI,CAAC,4BAA4B,EAAE,kBAAkB,GAAG,iBAAiB,CAAC;IACpI,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,QAAQ,CAAC,EAAE,mCAAmC,CAAC;CAChD;AACD,UAAU,sCAAsC;IAC9C;;;OAGG;IACH,QAAQ,EAAE,gCAAgC,CAAC;CAC5C;AACD,UAAU,yCAAyC;IACjD;;;OAGG;IACH,QAAQ,EAAE,mCAAmC,CAAC;CAC/C;AASD;;;;;;;;;;GAUG;AACH,wBAAgB,eAAe,CAC7B,WAAW,EAAE,MAAM,EACnB,WAAW,EAAE,MAAM,EACnB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,OAAO,EAAE,mCAAmC,GAC3C,SAAS,CAAC;AACb,wBAAgB,eAAe,CAC7B,WAAW,EAAE,MAAM,EACnB,WAAW,EAAE,MAAM,EACnB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,OAAO,EAAE,+BAA+B,GAAG,sCAAsC,GAChF,SAAS,CAAA;AACZ,wBAAgB,eAAe,CAC7B,WAAW,EAAE,MAAM,EACnB,WAAW,EAAE,MAAM,EACnB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,OAAO,EAAE,kCAAkC,GAAG,yCAAyC,GACtF,SAAS,CAAA;AACZ,wBAAgB,eAAe,CAC7B,WAAW,EAAE,MAAM,EACnB,WAAW,EAAE,MAAM,EACnB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,OAAO,EAAE,+BAA+B,GACvC,eAAe,GAAG,SAAS,CAAA;AAC9B,wBAAgB,eAAe,CAC7B,WAAW,EAAE,MAAM,EACnB,WAAW,EAAE,MAAM,EACnB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,SAAS,CAAC,EAAE,MAAM,EAClB,SAAS,CAAC,EAAE,MAAM,EAClB,OAAO,CAAC,EAAE,kCAAkC,GAC3C,eAAe,CAAA;AA2JlB;;;GAGG;AACH,wBAAgB,WAAW,CAAC,KAAK,EAAE,eAAe,GAAG,eAAe,EAAE,GAAG,MAAM,CAmC9E;AAED,KAAK,4BAA4B,GAAG,CAAC,KAAK,EAAE,MAAM,GAAG,SAAS,KAAK,IAAI,CAAC;AACxE,KAAK,+BAA+B,GAAG,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,CAAC;AAE/D,UAAU,4BAA6B,SAAQ,IAAI,CAAC,yBAAyB,EAAE,kBAAkB,GAAG,iBAAiB,CAAC;IACpH,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,QAAQ,CAAC,EAAE,4BAA4B,CAAC;CACzC;AACD,MAAM,MAAM,2BAA2B,GAAG,4BAA4B,GAAG,oBAAoB,CAAC;AAC9F,MAAM,WAAW,8BAA+B,SAAQ,IAAI,CAAC,4BAA4B,EAAE,kBAAkB,GAAG,iBAAiB,CAAC;IAChI,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,QAAQ,CAAC,EAAE,+BAA+B,CAAC;CAC5C;AACD,UAAU,kCAAkC;IAC1C,QAAQ,EAAE,4BAA4B,CAAC;CACxC;AACD,UAAU,qCAAqC;IAC7C,QAAQ,EAAE,+BAA+B,CAAC;CAC3C;AAED;;;;;;;;GAQG;AACH,wBAAgB,mBAAmB,CACjC,WAAW,EAAE,MAAM,EACnB,WAAW,EAAE,MAAM,EACnB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,OAAO,EAAE,+BAA+B,GACvC,SAAS,CAAC;AACb,wBAAgB,mBAAmB,CACjC,WAAW,EAAE,MAAM,EACnB,WAAW,EAAE,MAAM,EACnB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,OAAO,EAAE,2BAA2B,GAAG,kCAAkC,GACxE,SAAS,CAAA;AACZ,wBAAgB,mBAAmB,CACjC,WAAW,EAAE,MAAM,EACnB,WAAW,EAAE,MAAM,EACnB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,OAAO,EAAE,8BAA8B,GAAG,qCAAqC,GAC9E,SAAS,CAAA;AACZ,wBAAgB,mBAAmB,CACjC,WAAW,EAAE,MAAM,EACnB,WAAW,EAAE,MAAM,EACnB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,OAAO,EAAE,2BAA2B,GACnC,MAAM,GAAG,SAAS,CAAA;AACrB,wBAAgB,mBAAmB,CACjC,WAAW,EAAE,MAAM,EACnB,WAAW,EAAE,MAAM,EACnB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,SAAS,CAAC,EAAE,MAAM,EAClB,SAAS,CAAC,EAAE,MAAM,EAClB,OAAO,CAAC,EAAE,8BAA8B,GACvC,MAAM,CAAA;AA2CT;;;;;;;;;GASG;AACH,wBAAgB,WAAW,CACzB,QAAQ,EAAE,MAAM,EAChB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,OAAO,EAAE,+BAA+B,GACvC,SAAS,CAAC;AACb,wBAAgB,WAAW,CACzB,QAAQ,EAAE,MAAM,EAChB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,OAAO,EAAE,2BAA2B,GAAG,kCAAkC,GACxE,SAAS,CAAA;AACZ,wBAAgB,WAAW,CACzB,QAAQ,EAAE,MAAM,EAChB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,OAAO,EAAE,8BAA8B,GAAG,qCAAqC,GAC9E,SAAS,CAAA;AACZ,wBAAgB,WAAW,CACzB,QAAQ,EAAE,MAAM,EAChB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,SAAS,EAAE,MAAM,GAAG,SAAS,EAC7B,OAAO,EAAE,2BAA2B,GACnC,MAAM,GAAG,SAAS,CAAA;AACrB,wBAAgB,WAAW,CACzB,QAAQ,EAAE,MAAM,EAChB,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,EACd,SAAS,CAAC,EAAE,MAAM,EAClB,SAAS,CAAC,EAAE,MAAM,EAClB,OAAO,CAAC,EAAE,8BAA8B,GACvC,MAAM,CAAA"}
|
||||
201
Frontend-Learner/node_modules/diff/libesm/patch/create.js
generated
vendored
Normal file
201
Frontend-Learner/node_modules/diff/libesm/patch/create.js
generated
vendored
Normal file
|
|
@ -0,0 +1,201 @@
|
|||
import { diffLines } from '../diff/line.js';
|
||||
export function structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options) {
|
||||
let optionsObj;
|
||||
if (!options) {
|
||||
optionsObj = {};
|
||||
}
|
||||
else if (typeof options === 'function') {
|
||||
optionsObj = { callback: options };
|
||||
}
|
||||
else {
|
||||
optionsObj = options;
|
||||
}
|
||||
if (typeof optionsObj.context === 'undefined') {
|
||||
optionsObj.context = 4;
|
||||
}
|
||||
// We copy this into its own variable to placate TypeScript, which thinks
|
||||
// optionsObj.context might be undefined in the callbacks below.
|
||||
const context = optionsObj.context;
|
||||
// @ts-expect-error (runtime check for something that is correctly a static type error)
|
||||
if (optionsObj.newlineIsToken) {
|
||||
throw new Error('newlineIsToken may not be used with patch-generation functions, only with diffing functions');
|
||||
}
|
||||
if (!optionsObj.callback) {
|
||||
return diffLinesResultToPatch(diffLines(oldStr, newStr, optionsObj));
|
||||
}
|
||||
else {
|
||||
const { callback } = optionsObj;
|
||||
diffLines(oldStr, newStr, Object.assign(Object.assign({}, optionsObj), { callback: (diff) => {
|
||||
const patch = diffLinesResultToPatch(diff);
|
||||
// TypeScript is unhappy without the cast because it does not understand that `patch` may
|
||||
// be undefined here only if `callback` is StructuredPatchCallbackAbortable:
|
||||
callback(patch);
|
||||
} }));
|
||||
}
|
||||
function diffLinesResultToPatch(diff) {
|
||||
// STEP 1: Build up the patch with no "\ No newline at end of file" lines and with the arrays
|
||||
// of lines containing trailing newline characters. We'll tidy up later...
|
||||
if (!diff) {
|
||||
return;
|
||||
}
|
||||
diff.push({ value: '', lines: [] }); // Append an empty value to make cleanup easier
|
||||
function contextLines(lines) {
|
||||
return lines.map(function (entry) { return ' ' + entry; });
|
||||
}
|
||||
const hunks = [];
|
||||
let oldRangeStart = 0, newRangeStart = 0, curRange = [], oldLine = 1, newLine = 1;
|
||||
for (let i = 0; i < diff.length; i++) {
|
||||
const current = diff[i], lines = current.lines || splitLines(current.value);
|
||||
current.lines = lines;
|
||||
if (current.added || current.removed) {
|
||||
// If we have previous context, start with that
|
||||
if (!oldRangeStart) {
|
||||
const prev = diff[i - 1];
|
||||
oldRangeStart = oldLine;
|
||||
newRangeStart = newLine;
|
||||
if (prev) {
|
||||
curRange = context > 0 ? contextLines(prev.lines.slice(-context)) : [];
|
||||
oldRangeStart -= curRange.length;
|
||||
newRangeStart -= curRange.length;
|
||||
}
|
||||
}
|
||||
// Output our changes
|
||||
for (const line of lines) {
|
||||
curRange.push((current.added ? '+' : '-') + line);
|
||||
}
|
||||
// Track the updated file position
|
||||
if (current.added) {
|
||||
newLine += lines.length;
|
||||
}
|
||||
else {
|
||||
oldLine += lines.length;
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Identical context lines. Track line changes
|
||||
if (oldRangeStart) {
|
||||
// Close out any changes that have been output (or join overlapping)
|
||||
if (lines.length <= context * 2 && i < diff.length - 2) {
|
||||
// Overlapping
|
||||
for (const line of contextLines(lines)) {
|
||||
curRange.push(line);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// end the range and output
|
||||
const contextSize = Math.min(lines.length, context);
|
||||
for (const line of contextLines(lines.slice(0, contextSize))) {
|
||||
curRange.push(line);
|
||||
}
|
||||
const hunk = {
|
||||
oldStart: oldRangeStart,
|
||||
oldLines: (oldLine - oldRangeStart + contextSize),
|
||||
newStart: newRangeStart,
|
||||
newLines: (newLine - newRangeStart + contextSize),
|
||||
lines: curRange
|
||||
};
|
||||
hunks.push(hunk);
|
||||
oldRangeStart = 0;
|
||||
newRangeStart = 0;
|
||||
curRange = [];
|
||||
}
|
||||
}
|
||||
oldLine += lines.length;
|
||||
newLine += lines.length;
|
||||
}
|
||||
}
|
||||
// Step 2: eliminate the trailing `\n` from each line of each hunk, and, where needed, add
|
||||
// "\ No newline at end of file".
|
||||
for (const hunk of hunks) {
|
||||
for (let i = 0; i < hunk.lines.length; i++) {
|
||||
if (hunk.lines[i].endsWith('\n')) {
|
||||
hunk.lines[i] = hunk.lines[i].slice(0, -1);
|
||||
}
|
||||
else {
|
||||
hunk.lines.splice(i + 1, 0, '\\ No newline at end of file');
|
||||
i++; // Skip the line we just added, then continue iterating
|
||||
}
|
||||
}
|
||||
}
|
||||
return {
|
||||
oldFileName: oldFileName, newFileName: newFileName,
|
||||
oldHeader: oldHeader, newHeader: newHeader,
|
||||
hunks: hunks
|
||||
};
|
||||
}
|
||||
}
|
||||
/**
|
||||
* creates a unified diff patch.
|
||||
* @param patch either a single structured patch object (as returned by `structuredPatch`) or an array of them (as returned by `parsePatch`)
|
||||
*/
|
||||
export function formatPatch(patch) {
|
||||
if (Array.isArray(patch)) {
|
||||
return patch.map(formatPatch).join('\n');
|
||||
}
|
||||
const ret = [];
|
||||
if (patch.oldFileName == patch.newFileName) {
|
||||
ret.push('Index: ' + patch.oldFileName);
|
||||
}
|
||||
ret.push('===================================================================');
|
||||
ret.push('--- ' + patch.oldFileName + (typeof patch.oldHeader === 'undefined' ? '' : '\t' + patch.oldHeader));
|
||||
ret.push('+++ ' + patch.newFileName + (typeof patch.newHeader === 'undefined' ? '' : '\t' + patch.newHeader));
|
||||
for (let i = 0; i < patch.hunks.length; i++) {
|
||||
const hunk = patch.hunks[i];
|
||||
// Unified Diff Format quirk: If the chunk size is 0,
|
||||
// the first number is one lower than one would expect.
|
||||
// https://www.artima.com/weblogs/viewpost.jsp?thread=164293
|
||||
if (hunk.oldLines === 0) {
|
||||
hunk.oldStart -= 1;
|
||||
}
|
||||
if (hunk.newLines === 0) {
|
||||
hunk.newStart -= 1;
|
||||
}
|
||||
ret.push('@@ -' + hunk.oldStart + ',' + hunk.oldLines
|
||||
+ ' +' + hunk.newStart + ',' + hunk.newLines
|
||||
+ ' @@');
|
||||
for (const line of hunk.lines) {
|
||||
ret.push(line);
|
||||
}
|
||||
}
|
||||
return ret.join('\n') + '\n';
|
||||
}
|
||||
export function createTwoFilesPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options) {
|
||||
if (typeof options === 'function') {
|
||||
options = { callback: options };
|
||||
}
|
||||
if (!(options === null || options === void 0 ? void 0 : options.callback)) {
|
||||
const patchObj = structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options);
|
||||
if (!patchObj) {
|
||||
return;
|
||||
}
|
||||
return formatPatch(patchObj);
|
||||
}
|
||||
else {
|
||||
const { callback } = options;
|
||||
structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, Object.assign(Object.assign({}, options), { callback: patchObj => {
|
||||
if (!patchObj) {
|
||||
callback(undefined);
|
||||
}
|
||||
else {
|
||||
callback(formatPatch(patchObj));
|
||||
}
|
||||
} }));
|
||||
}
|
||||
}
|
||||
export function createPatch(fileName, oldStr, newStr, oldHeader, newHeader, options) {
|
||||
return createTwoFilesPatch(fileName, fileName, oldStr, newStr, oldHeader, newHeader, options);
|
||||
}
|
||||
/**
|
||||
* Split `text` into an array of lines, including the trailing newline character (where present)
|
||||
*/
|
||||
function splitLines(text) {
|
||||
const hasTrailingNl = text.endsWith('\n');
|
||||
const result = text.split('\n').map(line => line + '\n');
|
||||
if (hasTrailingNl) {
|
||||
result.pop();
|
||||
}
|
||||
else {
|
||||
result.push(result.pop().slice(0, -1));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
17
Frontend-Learner/node_modules/diff/libesm/patch/line-endings.d.ts
generated
vendored
Normal file
17
Frontend-Learner/node_modules/diff/libesm/patch/line-endings.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
import type { StructuredPatch } from '../types.js';
|
||||
export declare function unixToWin(patch: StructuredPatch): StructuredPatch;
|
||||
export declare function unixToWin(patches: StructuredPatch[]): StructuredPatch[];
|
||||
export declare function unixToWin(patch: StructuredPatch | StructuredPatch[]): StructuredPatch | StructuredPatch[];
|
||||
export declare function winToUnix(patch: StructuredPatch): StructuredPatch;
|
||||
export declare function winToUnix(patches: StructuredPatch[]): StructuredPatch[];
|
||||
export declare function winToUnix(patch: StructuredPatch | StructuredPatch[]): StructuredPatch | StructuredPatch[];
|
||||
/**
|
||||
* Returns true if the patch consistently uses Unix line endings (or only involves one line and has
|
||||
* no line endings).
|
||||
*/
|
||||
export declare function isUnix(patch: StructuredPatch | StructuredPatch[]): boolean;
|
||||
/**
|
||||
* Returns true if the patch uses Windows line endings and only Windows line endings.
|
||||
*/
|
||||
export declare function isWin(patch: StructuredPatch | StructuredPatch[]): boolean;
|
||||
//# sourceMappingURL=line-endings.d.ts.map
|
||||
1
Frontend-Learner/node_modules/diff/libesm/patch/line-endings.d.ts.map
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/patch/line-endings.d.ts.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"line-endings.d.ts","sourceRoot":"","sources":["../../src/patch/line-endings.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,aAAa,CAAC;AAEnD,wBAAgB,SAAS,CAAC,KAAK,EAAE,eAAe,GAAG,eAAe,CAAC;AACnE,wBAAgB,SAAS,CAAC,OAAO,EAAE,eAAe,EAAE,GAAG,eAAe,EAAE,CAAC;AACzE,wBAAgB,SAAS,CAAC,KAAK,EAAE,eAAe,GAAG,eAAe,EAAE,GAAG,eAAe,GAAG,eAAe,EAAE,CAAC;AA0B3G,wBAAgB,SAAS,CAAC,KAAK,EAAE,eAAe,GAAG,eAAe,CAAC;AACnE,wBAAgB,SAAS,CAAC,OAAO,EAAE,eAAe,EAAE,GAAG,eAAe,EAAE,CAAC;AACzE,wBAAgB,SAAS,CAAC,KAAK,EAAE,eAAe,GAAG,eAAe,EAAE,GAAG,eAAe,GAAG,eAAe,EAAE,CAAC;AAgB3G;;;GAGG;AACH,wBAAgB,MAAM,CAAC,KAAK,EAAE,eAAe,GAAG,eAAe,EAAE,GAAG,OAAO,CAS1E;AAED;;GAEG;AACH,wBAAgB,KAAK,CAAC,KAAK,EAAE,eAAe,GAAG,eAAe,EAAE,GAAG,OAAO,CAUzE"}
|
||||
44
Frontend-Learner/node_modules/diff/libesm/patch/line-endings.js
generated
vendored
Normal file
44
Frontend-Learner/node_modules/diff/libesm/patch/line-endings.js
generated
vendored
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
export function unixToWin(patch) {
|
||||
if (Array.isArray(patch)) {
|
||||
// It would be cleaner if instead of the line below we could just write
|
||||
// return patch.map(unixToWin)
|
||||
// but mysteriously TypeScript (v5.7.3 at the time of writing) does not like this and it will
|
||||
// refuse to compile, thinking that unixToWin could then return StructuredPatch[][] and the
|
||||
// result would be incompatible with the overload signatures.
|
||||
// See bug report at https://github.com/microsoft/TypeScript/issues/61398.
|
||||
return patch.map(p => unixToWin(p));
|
||||
}
|
||||
return Object.assign(Object.assign({}, patch), { hunks: patch.hunks.map(hunk => (Object.assign(Object.assign({}, hunk), { lines: hunk.lines.map((line, i) => {
|
||||
var _a;
|
||||
return (line.startsWith('\\') || line.endsWith('\r') || ((_a = hunk.lines[i + 1]) === null || _a === void 0 ? void 0 : _a.startsWith('\\')))
|
||||
? line
|
||||
: line + '\r';
|
||||
}) }))) });
|
||||
}
|
||||
export function winToUnix(patch) {
|
||||
if (Array.isArray(patch)) {
|
||||
// (See comment above equivalent line in unixToWin)
|
||||
return patch.map(p => winToUnix(p));
|
||||
}
|
||||
return Object.assign(Object.assign({}, patch), { hunks: patch.hunks.map(hunk => (Object.assign(Object.assign({}, hunk), { lines: hunk.lines.map(line => line.endsWith('\r') ? line.substring(0, line.length - 1) : line) }))) });
|
||||
}
|
||||
/**
|
||||
* Returns true if the patch consistently uses Unix line endings (or only involves one line and has
|
||||
* no line endings).
|
||||
*/
|
||||
export function isUnix(patch) {
|
||||
if (!Array.isArray(patch)) {
|
||||
patch = [patch];
|
||||
}
|
||||
return !patch.some(index => index.hunks.some(hunk => hunk.lines.some(line => !line.startsWith('\\') && line.endsWith('\r'))));
|
||||
}
|
||||
/**
|
||||
* Returns true if the patch uses Windows line endings and only Windows line endings.
|
||||
*/
|
||||
export function isWin(patch) {
|
||||
if (!Array.isArray(patch)) {
|
||||
patch = [patch];
|
||||
}
|
||||
return patch.some(index => index.hunks.some(hunk => hunk.lines.some(line => line.endsWith('\r'))))
|
||||
&& patch.every(index => index.hunks.every(hunk => hunk.lines.every((line, i) => { var _a; return line.startsWith('\\') || line.endsWith('\r') || ((_a = hunk.lines[i + 1]) === null || _a === void 0 ? void 0 : _a.startsWith('\\')); })));
|
||||
}
|
||||
8
Frontend-Learner/node_modules/diff/libesm/patch/parse.d.ts
generated
vendored
Normal file
8
Frontend-Learner/node_modules/diff/libesm/patch/parse.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
import type { StructuredPatch } from '../types.js';
|
||||
/**
|
||||
* Parses a patch into structured data, in the same structure returned by `structuredPatch`.
|
||||
*
|
||||
* @return a JSON object representation of the a patch, suitable for use with the `applyPatch` method.
|
||||
*/
|
||||
export declare function parsePatch(uniDiff: string): StructuredPatch[];
|
||||
//# sourceMappingURL=parse.d.ts.map
|
||||
1
Frontend-Learner/node_modules/diff/libesm/patch/parse.d.ts.map
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/patch/parse.d.ts.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"parse.d.ts","sourceRoot":"","sources":["../../src/patch/parse.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,aAAa,CAAC;AAEnD;;;;GAIG;AACH,wBAAgB,UAAU,CAAC,OAAO,EAAE,MAAM,GAAG,eAAe,EAAE,CAiJ7D"}
|
||||
130
Frontend-Learner/node_modules/diff/libesm/patch/parse.js
generated
vendored
Normal file
130
Frontend-Learner/node_modules/diff/libesm/patch/parse.js
generated
vendored
Normal file
|
|
@ -0,0 +1,130 @@
|
|||
/**
|
||||
* Parses a patch into structured data, in the same structure returned by `structuredPatch`.
|
||||
*
|
||||
* @return a JSON object representation of the a patch, suitable for use with the `applyPatch` method.
|
||||
*/
|
||||
export function parsePatch(uniDiff) {
|
||||
const diffstr = uniDiff.split(/\n/), list = [];
|
||||
let i = 0;
|
||||
function parseIndex() {
|
||||
const index = {};
|
||||
list.push(index);
|
||||
// Parse diff metadata
|
||||
while (i < diffstr.length) {
|
||||
const line = diffstr[i];
|
||||
// File header found, end parsing diff metadata
|
||||
if ((/^(---|\+\+\+|@@)\s/).test(line)) {
|
||||
break;
|
||||
}
|
||||
// Diff index
|
||||
const header = (/^(?:Index:|diff(?: -r \w+)+)\s+(.+?)\s*$/).exec(line);
|
||||
if (header) {
|
||||
index.index = header[1];
|
||||
}
|
||||
i++;
|
||||
}
|
||||
// Parse file headers if they are defined. Unified diff requires them, but
|
||||
// there's no technical issues to have an isolated hunk without file header
|
||||
parseFileHeader(index);
|
||||
parseFileHeader(index);
|
||||
// Parse hunks
|
||||
index.hunks = [];
|
||||
while (i < diffstr.length) {
|
||||
const line = diffstr[i];
|
||||
if ((/^(Index:\s|diff\s|---\s|\+\+\+\s|===================================================================)/).test(line)) {
|
||||
break;
|
||||
}
|
||||
else if ((/^@@/).test(line)) {
|
||||
index.hunks.push(parseHunk());
|
||||
}
|
||||
else if (line) {
|
||||
throw new Error('Unknown line ' + (i + 1) + ' ' + JSON.stringify(line));
|
||||
}
|
||||
else {
|
||||
i++;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Parses the --- and +++ headers, if none are found, no lines
|
||||
// are consumed.
|
||||
function parseFileHeader(index) {
|
||||
const fileHeader = (/^(---|\+\+\+)\s+(.*)\r?$/).exec(diffstr[i]);
|
||||
if (fileHeader) {
|
||||
const data = fileHeader[2].split('\t', 2), header = (data[1] || '').trim();
|
||||
let fileName = data[0].replace(/\\\\/g, '\\');
|
||||
if ((/^".*"$/).test(fileName)) {
|
||||
fileName = fileName.substr(1, fileName.length - 2);
|
||||
}
|
||||
if (fileHeader[1] === '---') {
|
||||
index.oldFileName = fileName;
|
||||
index.oldHeader = header;
|
||||
}
|
||||
else {
|
||||
index.newFileName = fileName;
|
||||
index.newHeader = header;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
}
|
||||
// Parses a hunk
|
||||
// This assumes that we are at the start of a hunk.
|
||||
function parseHunk() {
|
||||
var _a;
|
||||
const chunkHeaderIndex = i, chunkHeaderLine = diffstr[i++], chunkHeader = chunkHeaderLine.split(/@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@/);
|
||||
const hunk = {
|
||||
oldStart: +chunkHeader[1],
|
||||
oldLines: typeof chunkHeader[2] === 'undefined' ? 1 : +chunkHeader[2],
|
||||
newStart: +chunkHeader[3],
|
||||
newLines: typeof chunkHeader[4] === 'undefined' ? 1 : +chunkHeader[4],
|
||||
lines: []
|
||||
};
|
||||
// Unified Diff Format quirk: If the chunk size is 0,
|
||||
// the first number is one lower than one would expect.
|
||||
// https://www.artima.com/weblogs/viewpost.jsp?thread=164293
|
||||
if (hunk.oldLines === 0) {
|
||||
hunk.oldStart += 1;
|
||||
}
|
||||
if (hunk.newLines === 0) {
|
||||
hunk.newStart += 1;
|
||||
}
|
||||
let addCount = 0, removeCount = 0;
|
||||
for (; i < diffstr.length && (removeCount < hunk.oldLines || addCount < hunk.newLines || ((_a = diffstr[i]) === null || _a === void 0 ? void 0 : _a.startsWith('\\'))); i++) {
|
||||
const operation = (diffstr[i].length == 0 && i != (diffstr.length - 1)) ? ' ' : diffstr[i][0];
|
||||
if (operation === '+' || operation === '-' || operation === ' ' || operation === '\\') {
|
||||
hunk.lines.push(diffstr[i]);
|
||||
if (operation === '+') {
|
||||
addCount++;
|
||||
}
|
||||
else if (operation === '-') {
|
||||
removeCount++;
|
||||
}
|
||||
else if (operation === ' ') {
|
||||
addCount++;
|
||||
removeCount++;
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw new Error(`Hunk at line ${chunkHeaderIndex + 1} contained invalid line ${diffstr[i]}`);
|
||||
}
|
||||
}
|
||||
// Handle the empty block count case
|
||||
if (!addCount && hunk.newLines === 1) {
|
||||
hunk.newLines = 0;
|
||||
}
|
||||
if (!removeCount && hunk.oldLines === 1) {
|
||||
hunk.oldLines = 0;
|
||||
}
|
||||
// Perform sanity checking
|
||||
if (addCount !== hunk.newLines) {
|
||||
throw new Error('Added line count did not match for hunk at line ' + (chunkHeaderIndex + 1));
|
||||
}
|
||||
if (removeCount !== hunk.oldLines) {
|
||||
throw new Error('Removed line count did not match for hunk at line ' + (chunkHeaderIndex + 1));
|
||||
}
|
||||
return hunk;
|
||||
}
|
||||
while (i < diffstr.length) {
|
||||
parseIndex();
|
||||
}
|
||||
return list;
|
||||
}
|
||||
9
Frontend-Learner/node_modules/diff/libesm/patch/reverse.d.ts
generated
vendored
Normal file
9
Frontend-Learner/node_modules/diff/libesm/patch/reverse.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
import type { StructuredPatch } from '../types.js';
|
||||
/**
|
||||
* @param patch either a single structured patch object (as returned by `structuredPatch`) or an array of them (as returned by `parsePatch`).
|
||||
* @returns a new structured patch which when applied will undo the original `patch`.
|
||||
*/
|
||||
export declare function reversePatch(structuredPatch: StructuredPatch): StructuredPatch;
|
||||
export declare function reversePatch(structuredPatch: StructuredPatch[]): StructuredPatch[];
|
||||
export declare function reversePatch(structuredPatch: StructuredPatch | StructuredPatch[]): StructuredPatch | StructuredPatch[];
|
||||
//# sourceMappingURL=reverse.d.ts.map
|
||||
1
Frontend-Learner/node_modules/diff/libesm/patch/reverse.d.ts.map
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/patch/reverse.d.ts.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"reverse.d.ts","sourceRoot":"","sources":["../../src/patch/reverse.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,aAAa,CAAC;AAEnD;;;GAGG;AACH,wBAAgB,YAAY,CAAC,eAAe,EAAE,eAAe,GAAG,eAAe,CAAC;AAChF,wBAAgB,YAAY,CAAC,eAAe,EAAE,eAAe,EAAE,GAAG,eAAe,EAAE,CAAC;AACpF,wBAAgB,YAAY,CAAC,eAAe,EAAE,eAAe,GAAG,eAAe,EAAE,GAAG,eAAe,GAAG,eAAe,EAAE,CAAC"}
|
||||
23
Frontend-Learner/node_modules/diff/libesm/patch/reverse.js
generated
vendored
Normal file
23
Frontend-Learner/node_modules/diff/libesm/patch/reverse.js
generated
vendored
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
export function reversePatch(structuredPatch) {
|
||||
if (Array.isArray(structuredPatch)) {
|
||||
// (See comment in unixToWin for why we need the pointless-looking anonymous function here)
|
||||
return structuredPatch.map(patch => reversePatch(patch)).reverse();
|
||||
}
|
||||
return Object.assign(Object.assign({}, structuredPatch), { oldFileName: structuredPatch.newFileName, oldHeader: structuredPatch.newHeader, newFileName: structuredPatch.oldFileName, newHeader: structuredPatch.oldHeader, hunks: structuredPatch.hunks.map(hunk => {
|
||||
return {
|
||||
oldLines: hunk.newLines,
|
||||
oldStart: hunk.newStart,
|
||||
newLines: hunk.oldLines,
|
||||
newStart: hunk.oldStart,
|
||||
lines: hunk.lines.map(l => {
|
||||
if (l.startsWith('-')) {
|
||||
return `+${l.slice(1)}`;
|
||||
}
|
||||
if (l.startsWith('+')) {
|
||||
return `-${l.slice(1)}`;
|
||||
}
|
||||
return l;
|
||||
})
|
||||
};
|
||||
}) });
|
||||
}
|
||||
213
Frontend-Learner/node_modules/diff/libesm/types.d.ts
generated
vendored
Normal file
213
Frontend-Learner/node_modules/diff/libesm/types.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,213 @@
|
|||
export interface ChangeObject<ValueT> {
|
||||
/**
|
||||
* The concatenated content of all the tokens represented by this change object - i.e. generally the text that is either added, deleted, or common, as a single string.
|
||||
* In cases where tokens are considered common but are non-identical (e.g. because an option like `ignoreCase` or a custom `comparator` was used), the value from the *new* string will be provided here.
|
||||
*/
|
||||
value: ValueT;
|
||||
/**
|
||||
* true if the value was inserted into the new string, otherwise false
|
||||
*/
|
||||
added: boolean;
|
||||
/**
|
||||
* true if the value was removed from the old string, otherwise false
|
||||
*/
|
||||
removed: boolean;
|
||||
/**
|
||||
* How many tokens (e.g. chars for `diffChars`, lines for `diffLines`) the value in the change object consists of
|
||||
*/
|
||||
count: number;
|
||||
}
|
||||
export type Change = ChangeObject<string>;
|
||||
export type ArrayChange = ChangeObject<any[]>;
|
||||
export interface CommonDiffOptions {
|
||||
/**
|
||||
* If `true`, the array of change objects returned will contain one change object per token (e.g. one per line if calling `diffLines`), instead of runs of consecutive tokens that are all added / all removed / all conserved being combined into a single change object.
|
||||
*/
|
||||
oneChangePerToken?: boolean;
|
||||
}
|
||||
export interface TimeoutOption {
|
||||
/**
|
||||
* A number of milliseconds after which the diffing algorithm will abort and return `undefined`.
|
||||
* Supported by the same functions as `maxEditLength`.
|
||||
*/
|
||||
timeout: number;
|
||||
}
|
||||
export interface MaxEditLengthOption {
|
||||
/**
|
||||
* A number specifying the maximum edit distance to consider between the old and new texts.
|
||||
* You can use this to limit the computational cost of diffing large, very different texts by giving up early if the cost will be huge.
|
||||
* This option can be passed either to diffing functions (`diffLines`, `diffChars`, etc) or to patch-creation function (`structuredPatch`, `createPatch`, etc), all of which will indicate that the max edit length was reached by returning `undefined` instead of whatever they'd normally return.
|
||||
*/
|
||||
maxEditLength: number;
|
||||
}
|
||||
export type AbortableDiffOptions = TimeoutOption | MaxEditLengthOption;
|
||||
export type DiffCallbackNonabortable<T> = (result: ChangeObject<T>[]) => void;
|
||||
export type DiffCallbackAbortable<T> = (result: ChangeObject<T>[] | undefined) => void;
|
||||
export interface CallbackOptionNonabortable<T> {
|
||||
/**
|
||||
* If provided, the diff will be computed in async mode to avoid blocking the event loop while the diff is calculated.
|
||||
* The value of the `callback` option should be a function and will be passed the computed diff or patch as its first argument.
|
||||
*/
|
||||
callback: DiffCallbackNonabortable<T>;
|
||||
}
|
||||
export interface CallbackOptionAbortable<T> {
|
||||
/**
|
||||
* If provided, the diff will be computed in async mode to avoid blocking the event loop while the diff is calculated.
|
||||
* The value of the `callback` option should be a function and will be passed the computed diff or patch as its first argument.
|
||||
*/
|
||||
callback: DiffCallbackAbortable<T>;
|
||||
}
|
||||
interface DiffArraysOptions<T> extends CommonDiffOptions {
|
||||
comparator?: (a: T, b: T) => boolean;
|
||||
}
|
||||
export interface DiffArraysOptionsNonabortable<T> extends DiffArraysOptions<T> {
|
||||
/**
|
||||
* If provided, the diff will be computed in async mode to avoid blocking the event loop while the diff is calculated.
|
||||
* The value of the `callback` option should be a function and will be passed the computed diff or patch as its first argument.
|
||||
*/
|
||||
callback?: DiffCallbackNonabortable<T[]>;
|
||||
}
|
||||
export type DiffArraysOptionsAbortable<T> = DiffArraysOptions<T> & AbortableDiffOptions & Partial<CallbackOptionAbortable<T[]>>;
|
||||
interface DiffCharsOptions extends CommonDiffOptions {
|
||||
/**
|
||||
* If `true`, the uppercase and lowercase forms of a character are considered equal.
|
||||
* @default false
|
||||
*/
|
||||
ignoreCase?: boolean;
|
||||
}
|
||||
export interface DiffCharsOptionsNonabortable extends DiffCharsOptions {
|
||||
/**
|
||||
* If provided, the diff will be computed in async mode to avoid blocking the event loop while the diff is calculated.
|
||||
* The value of the `callback` option should be a function and will be passed the computed diff or patch as its first argument.
|
||||
*/
|
||||
callback?: DiffCallbackNonabortable<string>;
|
||||
}
|
||||
export type DiffCharsOptionsAbortable = DiffCharsOptions & AbortableDiffOptions & Partial<CallbackOptionAbortable<string>>;
|
||||
interface DiffLinesOptions extends CommonDiffOptions {
|
||||
/**
|
||||
* `true` to remove all trailing CR (`\r`) characters before performing the diff.
|
||||
* This helps to get a useful diff when diffing UNIX text files against Windows text files.
|
||||
* @default false
|
||||
*/
|
||||
stripTrailingCr?: boolean;
|
||||
/**
|
||||
* `true` to treat the newline character at the end of each line as its own token.
|
||||
* This allows for changes to the newline structure to occur independently of the line content and to be treated as such.
|
||||
* In general this is the more human friendly form of `diffLines`; the default behavior with this option turned off is better suited for patches and other computer friendly output.
|
||||
*
|
||||
* Note that while using `ignoreWhitespace` in combination with `newlineIsToken` is not an error, results may not be as expected.
|
||||
* With `ignoreWhitespace: true` and `newlineIsToken: false`, changing a completely empty line to contain some spaces is treated as a non-change, but with `ignoreWhitespace: true` and `newlineIsToken: true`, it is treated as an insertion.
|
||||
* This is because the content of a completely blank line is not a token at all in `newlineIsToken` mode.
|
||||
*
|
||||
* @default false
|
||||
*/
|
||||
newlineIsToken?: boolean;
|
||||
/**
|
||||
* `true` to ignore a missing newline character at the end of the last line when comparing it to other lines.
|
||||
* (By default, the line `'b\n'` in text `'a\nb\nc'` is not considered equal to the line `'b'` in text `'a\nb'`; this option makes them be considered equal.)
|
||||
* Ignored if `ignoreWhitespace` or `newlineIsToken` are also true.
|
||||
* @default false
|
||||
*/
|
||||
ignoreNewlineAtEof?: boolean;
|
||||
/**
|
||||
* `true` to ignore leading and trailing whitespace characters when checking if two lines are equal.
|
||||
* @default false
|
||||
*/
|
||||
ignoreWhitespace?: boolean;
|
||||
}
|
||||
export interface DiffLinesOptionsNonabortable extends DiffLinesOptions {
|
||||
/**
|
||||
* If provided, the diff will be computed in async mode to avoid blocking the event loop while the diff is calculated.
|
||||
* The value of the `callback` option should be a function and will be passed the computed diff or patch as its first argument.
|
||||
*/
|
||||
callback?: DiffCallbackNonabortable<string>;
|
||||
}
|
||||
export type DiffLinesOptionsAbortable = DiffLinesOptions & AbortableDiffOptions & Partial<CallbackOptionAbortable<string>>;
|
||||
interface DiffWordsOptions extends CommonDiffOptions {
|
||||
/**
|
||||
* Same as in `diffChars`.
|
||||
* @default false
|
||||
*/
|
||||
ignoreCase?: boolean;
|
||||
/**
|
||||
* An optional [`Intl.Segmenter`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Intl/Segmenter) object (which must have a `granularity` of `'word'`) for `diffWords` to use to split the text into words.
|
||||
*
|
||||
* Note that this is (deliberately) incorrectly typed as `any` to avoid users whose `lib` & `target` settings in tsconfig.json are older than es2022 getting type errors when they build about `Intl.Segmenter` not existing.
|
||||
* This is kind of ugly, since it makes the type declarations worse for users who genuinely use this feature, but seemed worth it to avoid the majority of the library's users (who probably do not use this particular option) getting confusing errors and being forced to change their `lib` to es2022 (even if their own code doesn't use any es2022 functions).
|
||||
*
|
||||
* By default, `diffWords` does not use an `Intl.Segmenter`, just some regexes for splitting text into words. This will tend to give worse results than `Intl.Segmenter` would, but ensures the results are consistent across environments; `Intl.Segmenter` behaviour is only loosely specced and the implementations in browsers could in principle change dramatically in future. If you want to use `diffWords` with an `Intl.Segmenter` but ensure it behaves the same whatever environment you run it in, use an `Intl.Segmenter` polyfill instead of the JavaScript engine's native `Intl.Segmenter` implementation.
|
||||
*
|
||||
* Using an `Intl.Segmenter` should allow better word-level diffing of non-English text than the default behaviour. For instance, `Intl.Segmenter`s can generally identify via built-in dictionaries which sequences of adjacent Chinese characters form words, allowing word-level diffing of Chinese. By specifying a language when instantiating the segmenter (e.g. `new Intl.Segmenter('sv', {granularity: 'word'})`) you can also support language-specific rules, like treating Swedish's colon separated contractions (like *k:a* for *kyrka*) as single words; by default this would be seen as two words separated by a colon.
|
||||
*/
|
||||
intlSegmenter?: any;
|
||||
}
|
||||
export interface DiffWordsOptionsNonabortable extends DiffWordsOptions {
|
||||
/**
|
||||
* If provided, the diff will be computed in async mode to avoid blocking the event loop while the diff is calculated.
|
||||
* The value of the `callback` option should be a function and will be passed the computed diff or patch as its first argument.
|
||||
*/
|
||||
callback?: DiffCallbackNonabortable<string>;
|
||||
}
|
||||
export type DiffWordsOptionsAbortable = DiffWordsOptions & AbortableDiffOptions & Partial<CallbackOptionAbortable<string>>;
|
||||
interface DiffSentencesOptions extends CommonDiffOptions {
|
||||
}
|
||||
export interface DiffSentencesOptionsNonabortable extends DiffSentencesOptions {
|
||||
/**
|
||||
* If provided, the diff will be computed in async mode to avoid blocking the event loop while the diff is calculated.
|
||||
* The value of the `callback` option should be a function and will be passed the computed diff or patch as its first argument.
|
||||
*/
|
||||
callback?: DiffCallbackNonabortable<string>;
|
||||
}
|
||||
export type DiffSentencesOptionsAbortable = DiffSentencesOptions & AbortableDiffOptions & Partial<CallbackOptionAbortable<string>>;
|
||||
interface DiffJsonOptions extends CommonDiffOptions {
|
||||
/**
|
||||
* A value to replace `undefined` with. Ignored if a `stringifyReplacer` is provided.
|
||||
*/
|
||||
undefinedReplacement?: any;
|
||||
/**
|
||||
* A custom replacer function.
|
||||
* Operates similarly to the `replacer` parameter to [`JSON.stringify()`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify#the_replacer_parameter), but must be a function.
|
||||
*/
|
||||
stringifyReplacer?: (k: string, v: any) => any;
|
||||
}
|
||||
export interface DiffJsonOptionsNonabortable extends DiffJsonOptions {
|
||||
/**
|
||||
* If provided, the diff will be computed in async mode to avoid blocking the event loop while the diff is calculated.
|
||||
* The value of the `callback` option should be a function and will be passed the computed diff or patch as its first argument.
|
||||
*/
|
||||
callback?: DiffCallbackNonabortable<string>;
|
||||
}
|
||||
export type DiffJsonOptionsAbortable = DiffJsonOptions & AbortableDiffOptions & Partial<CallbackOptionAbortable<string>>;
|
||||
interface DiffCssOptions extends CommonDiffOptions {
|
||||
}
|
||||
export interface DiffCssOptionsNonabortable extends DiffCssOptions {
|
||||
/**
|
||||
* If provided, the diff will be computed in async mode to avoid blocking the event loop while the diff is calculated.
|
||||
* The value of the `callback` option should be a function and will be passed the computed diff or patch as its first argument.
|
||||
*/
|
||||
callback?: DiffCallbackNonabortable<string>;
|
||||
}
|
||||
export type DiffCssOptionsAbortable = DiffJsonOptions & AbortableDiffOptions & Partial<CallbackOptionAbortable<string>>;
|
||||
/**
|
||||
* Note that this contains the union of ALL options accepted by any of the built-in diffing
|
||||
* functions. The README notes which options are usable which functions. Using an option with a
|
||||
* diffing function that doesn't support it might yield unreasonable results.
|
||||
*/
|
||||
export type AllDiffOptions = DiffArraysOptions<unknown> & DiffCharsOptions & DiffWordsOptions & DiffLinesOptions & DiffJsonOptions;
|
||||
export interface StructuredPatch {
|
||||
oldFileName: string;
|
||||
newFileName: string;
|
||||
oldHeader: string | undefined;
|
||||
newHeader: string | undefined;
|
||||
hunks: StructuredPatchHunk[];
|
||||
index?: string;
|
||||
}
|
||||
export interface StructuredPatchHunk {
|
||||
oldStart: number;
|
||||
oldLines: number;
|
||||
newStart: number;
|
||||
newLines: number;
|
||||
lines: string[];
|
||||
}
|
||||
export {};
|
||||
//# sourceMappingURL=types.d.ts.map
|
||||
1
Frontend-Learner/node_modules/diff/libesm/types.d.ts.map
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/types.d.ts.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA,MAAM,WAAW,YAAY,CAAC,MAAM;IAClC;;;OAGG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,OAAO,CAAC;IACf;;OAEG;IACH,OAAO,EAAE,OAAO,CAAC;IACjB;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;CACf;AAMD,MAAM,MAAM,MAAM,GAAG,YAAY,CAAC,MAAM,CAAC,CAAC;AAC1C,MAAM,MAAM,WAAW,GAAG,YAAY,CAAC,GAAG,EAAE,CAAC,CAAC;AAE9C,MAAM,WAAW,iBAAiB;IAChC;;OAEG;IACH,iBAAiB,CAAC,EAAE,OAAO,CAAC;CAC7B;AAED,MAAM,WAAW,aAAa;IAC5B;;;OAGG;IACH,OAAO,EAAE,MAAM,CAAC;CACjB;AAED,MAAM,WAAW,mBAAmB;IAClC;;;;OAIG;IACH,aAAa,EAAE,MAAM,CAAC;CACvB;AAED,MAAM,MAAM,oBAAoB,GAAG,aAAa,GAAG,mBAAmB,CAAC;AAEvE,MAAM,MAAM,wBAAwB,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,YAAY,CAAC,CAAC,CAAC,EAAE,KAAK,IAAI,CAAC;AAC9E,MAAM,MAAM,qBAAqB,CAAC,CAAC,IAAI,CAAC,MAAM,EAAE,YAAY,CAAC,CAAC,CAAC,EAAE,GAAG,SAAS,KAAK,IAAI,CAAC;AAEvF,MAAM,WAAW,0BAA0B,CAAC,CAAC;IAC3C;;;OAGG;IACH,QAAQ,EAAE,wBAAwB,CAAC,CAAC,CAAC,CAAA;CACtC;AACD,MAAM,WAAW,uBAAuB,CAAC,CAAC;IACxC;;;OAGG;IACH,QAAQ,EAAE,qBAAqB,CAAC,CAAC,CAAC,CAAA;CACnC;AAED,UAAU,iBAAiB,CAAC,CAAC,CAAE,SAAQ,iBAAiB;IACtD,UAAU,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,OAAO,CAAC;CACtC;AACD,MAAM,WAAW,6BAA6B,CAAC,CAAC,CAAE,SAAQ,iBAAiB,CAAC,CAAC,CAAC;IAC5E;;;OAGG;IACH,QAAQ,CAAC,EAAE,wBAAwB,CAAC,CAAC,EAAE,CAAC,CAAA;CACzC;AACD,MAAM,MAAM,0BAA0B,CAAC,CAAC,IAAI,iBAAiB,CAAC,CAAC,CAAC,GAAG,oBAAoB,GAAG,OAAO,CAAC,uBAAuB,CAAC,CAAC,EAAE,CAAC,CAAC,CAAA;AAG/H,UAAU,gBAAiB,SAAQ,iBAAiB;IAClD;;;OAGG;IACH,UAAU,CAAC,EAAE,OAAO,CAAC;CACtB;AACD,MAAM,WAAW,4BAA6B,SAAQ,gBAAgB;IACpE;;;OAGG;IACH,QAAQ,CAAC,EAAE,wBAAwB,CAAC,MAAM,CAAC,CAAA;CAC5C;AACD,MAAM,MAAM,yBAAyB,GAAG,gBAAgB,GAAG,oBAAoB,GAAG,OAAO,CAAC,uBAAuB,CAAC,MAAM,CAAC,CAAC,CAAA;AAE1H,UAAU,gBAAiB,SAAQ,iBAAiB;IAClD;;;;OAIG;IACH,eAAe,CAAC,EAAE,OAAO,CAAC;IAC1B;;;;;;;;;;OAUG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IACzB;;;;;OAKG;IACH,kBAAkB,CAAC,EAAE,OAAO,CAAC;IAC7B;;;OAGG;IACH,gBAAgB,CAAC,EAAE,OAAO,CAAC;CAC5B;AACD,MAAM,WAAW,4BAA6B,SAAQ,gBAAgB;IACpE;;;OAGG;IACH,QAAQ,CAAC,EAAE,wBAAwB,CAAC,MAAM,CAAC,CAAA;CAC5C;AACD,MAAM,MAAM,yBAAyB,GAAG,gBAAgB,GAAG,oBAAoB,GAAG,OAAO,CAAC,uBAAuB,CAAC,MAAM,CAAC,CAAC,CAAA;AAG1H,UAAU,gBAAiB,SAAQ,iBAAiB;IAClD;;;OAGG;IACH,UAAU,CAAC,EAAE,OAAO,CAAA;IAEpB;;;;;;;;;OASG;IACH,aAAa,CAAC,EAAE,GAAG,CAAC;CACrB;AACD,MAAM,WAAW,4BAA6B,SAAQ,gBAAgB;IACpE;;;OAGG;IACH,QAAQ,CAAC,EAAE,wBAAwB,CAAC,MAAM,CAAC,CAAA;CAC5C;AACD,MAAM,MAAM,yBAAyB,GAAG,gBAAgB,GAAG,oBAAoB,GAAG,OAAO,CAAC,uBAAuB,CAAC,MAAM,CAAC,CAAC,CAAA;AAG1H,UAAU,oBAAqB,SAAQ,iBAAiB;CAAG;AAC3D,MAAM,WAAW,gCAAiC,SAAQ,oBAAoB;IAC5E;;;OAGG;IACH,QAAQ,CAAC,EAAE,wBAAwB,CAAC,MAAM,CAAC,CAAA;CAC5C;AACD,MAAM,MAAM,6BAA6B,GAAG,oBAAoB,GAAG,oBAAoB,GAAG,OAAO,CAAC,uBAAuB,CAAC,MAAM,CAAC,CAAC,CAAA;AAGlI,UAAU,eAAgB,SAAQ,iBAAiB;IACjD;;OAEG;IACH,oBAAoB,CAAC,EAAE,GAAG,CAAC;IAC3B;;;OAGG;IACH,iBAAiB,CAAC,EAAE,CAAC,CAAC,EAAE,MAAM,EAAE,CAAC,EAAE,GAAG,KAAK,GAAG,CAAC;CAChD;AACD,MAAM,WAAW,2BAA4B,SAAQ,eAAe;IAClE;;;OAGG;IACH,QAAQ,CAAC,EAAE,wBAAwB,CAAC,MAAM,CAAC,CAAA;CAC5C;AACD,MAAM,MAAM,wBAAwB,GAAG,eAAe,GAAG,oBAAoB,GAAG,OAAO,CAAC,uBAAuB,CAAC,MAAM,CAAC,CAAC,CAAA;AAGxH,UAAU,cAAe,SAAQ,iBAAiB;CAAG;AACrD,MAAM,WAAW,0BAA2B,SAAQ,cAAc;IAChE;;;OAGG;IACH,QAAQ,CAAC,EAAE,wBAAwB,CAAC,MAAM,CAAC,CAAA;CAC5C;AACD,MAAM,MAAM,uBAAuB,GAAG,eAAe,GAAG,oBAAoB,GAAG,OAAO,CAAC,uBAAuB,CAAC,MAAM,CAAC,CAAC,CAAA;AAGvH;;;;GAIG;AACH,MAAM,MAAM,cAAc,GACxB,iBAAiB,CAAC,OAAO,CAAC,GAC1B,gBAAgB,GAChB,gBAAgB,GAChB,gBAAgB,GAChB,eAAe,CAAC;AAElB,MAAM,WAAW,eAAe;IAC9B,WAAW,EAAE,MAAM,CAAC;IACpB,WAAW,EAAE,MAAM,CAAC;IACpB,SAAS,EAAE,MAAM,GAAG,SAAS,CAAC;IAC9B,SAAS,EAAE,MAAM,GAAG,SAAS,CAAC;IAC9B,KAAK,EAAE,mBAAmB,EAAE,CAAC;IAC7B,KAAK,CAAC,EAAE,MAAM,CAAC;CAChB;AAED,MAAM,WAAW,mBAAmB;IAClC,QAAQ,EAAE,MAAM,CAAC;IACjB,QAAQ,EAAE,MAAM,CAAC;IACjB,QAAQ,EAAE,MAAM,CAAC;IACjB,QAAQ,EAAE,MAAM,CAAC;IACjB,KAAK,EAAE,MAAM,EAAE,CAAC;CACjB"}
|
||||
1
Frontend-Learner/node_modules/diff/libesm/types.js
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/types.js
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
export {};
|
||||
3
Frontend-Learner/node_modules/diff/libesm/util/array.d.ts
generated
vendored
Normal file
3
Frontend-Learner/node_modules/diff/libesm/util/array.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
export declare function arrayEqual(a: any[], b: any[]): boolean;
|
||||
export declare function arrayStartsWith(array: any[], start: any[]): boolean;
|
||||
//# sourceMappingURL=array.d.ts.map
|
||||
1
Frontend-Learner/node_modules/diff/libesm/util/array.d.ts.map
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/util/array.d.ts.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"array.d.ts","sourceRoot":"","sources":["../../src/util/array.ts"],"names":[],"mappings":"AAAA,wBAAgB,UAAU,CAAC,CAAC,EAAE,GAAG,EAAE,EAAE,CAAC,EAAE,GAAG,EAAE,GAAG,OAAO,CAMtD;AAED,wBAAgB,eAAe,CAAC,KAAK,EAAE,GAAG,EAAE,EAAE,KAAK,EAAE,GAAG,EAAE,GAAG,OAAO,CAYnE"}
|
||||
17
Frontend-Learner/node_modules/diff/libesm/util/array.js
generated
vendored
Normal file
17
Frontend-Learner/node_modules/diff/libesm/util/array.js
generated
vendored
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
export function arrayEqual(a, b) {
|
||||
if (a.length !== b.length) {
|
||||
return false;
|
||||
}
|
||||
return arrayStartsWith(a, b);
|
||||
}
|
||||
export function arrayStartsWith(array, start) {
|
||||
if (start.length > array.length) {
|
||||
return false;
|
||||
}
|
||||
for (let i = 0; i < start.length; i++) {
|
||||
if (start[i] !== array[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
2
Frontend-Learner/node_modules/diff/libesm/util/distance-iterator.d.ts
generated
vendored
Normal file
2
Frontend-Learner/node_modules/diff/libesm/util/distance-iterator.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
export default function (start: number, minLine: number, maxLine: number): () => number | undefined;
|
||||
//# sourceMappingURL=distance-iterator.d.ts.map
|
||||
1
Frontend-Learner/node_modules/diff/libesm/util/distance-iterator.d.ts.map
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/util/distance-iterator.d.ts.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"distance-iterator.d.ts","sourceRoot":"","sources":["../../src/util/distance-iterator.ts"],"names":[],"mappings":"AAGA,MAAM,CAAC,OAAO,WAAU,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,GAAG,MAAM,MAAM,GAAG,SAAS,CA0CjG"}
|
||||
37
Frontend-Learner/node_modules/diff/libesm/util/distance-iterator.js
generated
vendored
Normal file
37
Frontend-Learner/node_modules/diff/libesm/util/distance-iterator.js
generated
vendored
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
// Iterator that traverses in the range of [min, max], stepping
|
||||
// by distance from a given start position. I.e. for [0, 4], with
|
||||
// start of 2, this will iterate 2, 3, 1, 4, 0.
|
||||
export default function (start, minLine, maxLine) {
|
||||
let wantForward = true, backwardExhausted = false, forwardExhausted = false, localOffset = 1;
|
||||
return function iterator() {
|
||||
if (wantForward && !forwardExhausted) {
|
||||
if (backwardExhausted) {
|
||||
localOffset++;
|
||||
}
|
||||
else {
|
||||
wantForward = false;
|
||||
}
|
||||
// Check if trying to fit beyond text length, and if not, check it fits
|
||||
// after offset location (or desired location on first iteration)
|
||||
if (start + localOffset <= maxLine) {
|
||||
return start + localOffset;
|
||||
}
|
||||
forwardExhausted = true;
|
||||
}
|
||||
if (!backwardExhausted) {
|
||||
if (!forwardExhausted) {
|
||||
wantForward = true;
|
||||
}
|
||||
// Check if trying to fit before text beginning, and if not, check it fits
|
||||
// before offset location
|
||||
if (minLine <= start - localOffset) {
|
||||
return start - localOffset++;
|
||||
}
|
||||
backwardExhausted = true;
|
||||
return iterator();
|
||||
}
|
||||
// We tried to fit hunk before text beginning and beyond text length, then
|
||||
// hunk can't fit on the text. Return undefined
|
||||
return undefined;
|
||||
};
|
||||
}
|
||||
4
Frontend-Learner/node_modules/diff/libesm/util/params.d.ts
generated
vendored
Normal file
4
Frontend-Learner/node_modules/diff/libesm/util/params.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
export declare function generateOptions(options: {
|
||||
[key: string]: any;
|
||||
} | ((_: unknown) => void), defaults: any): object;
|
||||
//# sourceMappingURL=params.d.ts.map
|
||||
1
Frontend-Learner/node_modules/diff/libesm/util/params.d.ts.map
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/util/params.d.ts.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"params.d.ts","sourceRoot":"","sources":["../../src/util/params.ts"],"names":[],"mappings":"AAAA,wBAAgB,eAAe,CAC7B,OAAO,EAAE;IAAC,CAAC,GAAG,EAAE,MAAM,GAAG,GAAG,CAAA;CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,OAAO,KAAK,IAAI,CAAC,EACtD,QAAQ,EAAE,GAAG,GACZ,MAAM,CAYR"}
|
||||
14
Frontend-Learner/node_modules/diff/libesm/util/params.js
generated
vendored
Normal file
14
Frontend-Learner/node_modules/diff/libesm/util/params.js
generated
vendored
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
export function generateOptions(options, defaults) {
|
||||
if (typeof options === 'function') {
|
||||
defaults.callback = options;
|
||||
}
|
||||
else if (options) {
|
||||
for (const name in options) {
|
||||
/* istanbul ignore else */
|
||||
if (Object.prototype.hasOwnProperty.call(options, name)) {
|
||||
defaults[name] = options[name];
|
||||
}
|
||||
}
|
||||
}
|
||||
return defaults;
|
||||
}
|
||||
18
Frontend-Learner/node_modules/diff/libesm/util/string.d.ts
generated
vendored
Normal file
18
Frontend-Learner/node_modules/diff/libesm/util/string.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
export declare function longestCommonPrefix(str1: string, str2: string): string;
|
||||
export declare function longestCommonSuffix(str1: string, str2: string): string;
|
||||
export declare function replacePrefix(string: string, oldPrefix: string, newPrefix: string): string;
|
||||
export declare function replaceSuffix(string: string, oldSuffix: string, newSuffix: string): string;
|
||||
export declare function removePrefix(string: string, oldPrefix: string): string;
|
||||
export declare function removeSuffix(string: string, oldSuffix: string): string;
|
||||
export declare function maximumOverlap(string1: string, string2: string): string;
|
||||
/**
|
||||
* Returns true if the string consistently uses Windows line endings.
|
||||
*/
|
||||
export declare function hasOnlyWinLineEndings(string: string): boolean;
|
||||
/**
|
||||
* Returns true if the string consistently uses Unix line endings.
|
||||
*/
|
||||
export declare function hasOnlyUnixLineEndings(string: string): boolean;
|
||||
export declare function trailingWs(string: string): string;
|
||||
export declare function leadingWs(string: string): string;
|
||||
//# sourceMappingURL=string.d.ts.map
|
||||
1
Frontend-Learner/node_modules/diff/libesm/util/string.d.ts.map
generated
vendored
Normal file
1
Frontend-Learner/node_modules/diff/libesm/util/string.d.ts.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"string.d.ts","sourceRoot":"","sources":["../../src/util/string.ts"],"names":[],"mappings":"AAAA,wBAAgB,mBAAmB,CAAC,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,MAAM,GAAG,MAAM,CAQtE;AAED,wBAAgB,mBAAmB,CAAC,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,MAAM,GAAG,MAAM,CAgBtE;AAED,wBAAgB,aAAa,CAAC,MAAM,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,GAAG,MAAM,CAK1F;AAED,wBAAgB,aAAa,CAAC,MAAM,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,GAAG,MAAM,CAS1F;AAED,wBAAgB,YAAY,CAAC,MAAM,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,GAAG,MAAM,CAEtE;AAED,wBAAgB,YAAY,CAAC,MAAM,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,GAAG,MAAM,CAEtE;AAED,wBAAgB,cAAc,CAAC,OAAO,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,GAAG,MAAM,CAEvE;AAkCD;;GAEG;AACH,wBAAgB,qBAAqB,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAE7D;AAED;;GAEG;AACH,wBAAgB,sBAAsB,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAE9D;AAED,wBAAgB,UAAU,CAAC,MAAM,EAAE,MAAM,GAAG,MAAM,CAmBjD;AAED,wBAAgB,SAAS,CAAC,MAAM,EAAE,MAAM,GAAG,MAAM,CAIhD"}
|
||||
128
Frontend-Learner/node_modules/diff/libesm/util/string.js
generated
vendored
Normal file
128
Frontend-Learner/node_modules/diff/libesm/util/string.js
generated
vendored
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
export function longestCommonPrefix(str1, str2) {
|
||||
let i;
|
||||
for (i = 0; i < str1.length && i < str2.length; i++) {
|
||||
if (str1[i] != str2[i]) {
|
||||
return str1.slice(0, i);
|
||||
}
|
||||
}
|
||||
return str1.slice(0, i);
|
||||
}
|
||||
export function longestCommonSuffix(str1, str2) {
|
||||
let i;
|
||||
// Unlike longestCommonPrefix, we need a special case to handle all scenarios
|
||||
// where we return the empty string since str1.slice(-0) will return the
|
||||
// entire string.
|
||||
if (!str1 || !str2 || str1[str1.length - 1] != str2[str2.length - 1]) {
|
||||
return '';
|
||||
}
|
||||
for (i = 0; i < str1.length && i < str2.length; i++) {
|
||||
if (str1[str1.length - (i + 1)] != str2[str2.length - (i + 1)]) {
|
||||
return str1.slice(-i);
|
||||
}
|
||||
}
|
||||
return str1.slice(-i);
|
||||
}
|
||||
export function replacePrefix(string, oldPrefix, newPrefix) {
|
||||
if (string.slice(0, oldPrefix.length) != oldPrefix) {
|
||||
throw Error(`string ${JSON.stringify(string)} doesn't start with prefix ${JSON.stringify(oldPrefix)}; this is a bug`);
|
||||
}
|
||||
return newPrefix + string.slice(oldPrefix.length);
|
||||
}
|
||||
export function replaceSuffix(string, oldSuffix, newSuffix) {
|
||||
if (!oldSuffix) {
|
||||
return string + newSuffix;
|
||||
}
|
||||
if (string.slice(-oldSuffix.length) != oldSuffix) {
|
||||
throw Error(`string ${JSON.stringify(string)} doesn't end with suffix ${JSON.stringify(oldSuffix)}; this is a bug`);
|
||||
}
|
||||
return string.slice(0, -oldSuffix.length) + newSuffix;
|
||||
}
|
||||
export function removePrefix(string, oldPrefix) {
|
||||
return replacePrefix(string, oldPrefix, '');
|
||||
}
|
||||
export function removeSuffix(string, oldSuffix) {
|
||||
return replaceSuffix(string, oldSuffix, '');
|
||||
}
|
||||
export function maximumOverlap(string1, string2) {
|
||||
return string2.slice(0, overlapCount(string1, string2));
|
||||
}
|
||||
// Nicked from https://stackoverflow.com/a/60422853/1709587
|
||||
function overlapCount(a, b) {
|
||||
// Deal with cases where the strings differ in length
|
||||
let startA = 0;
|
||||
if (a.length > b.length) {
|
||||
startA = a.length - b.length;
|
||||
}
|
||||
let endB = b.length;
|
||||
if (a.length < b.length) {
|
||||
endB = a.length;
|
||||
}
|
||||
// Create a back-reference for each index
|
||||
// that should be followed in case of a mismatch.
|
||||
// We only need B to make these references:
|
||||
const map = Array(endB);
|
||||
let k = 0; // Index that lags behind j
|
||||
map[0] = 0;
|
||||
for (let j = 1; j < endB; j++) {
|
||||
if (b[j] == b[k]) {
|
||||
map[j] = map[k]; // skip over the same character (optional optimisation)
|
||||
}
|
||||
else {
|
||||
map[j] = k;
|
||||
}
|
||||
while (k > 0 && b[j] != b[k]) {
|
||||
k = map[k];
|
||||
}
|
||||
if (b[j] == b[k]) {
|
||||
k++;
|
||||
}
|
||||
}
|
||||
// Phase 2: use these references while iterating over A
|
||||
k = 0;
|
||||
for (let i = startA; i < a.length; i++) {
|
||||
while (k > 0 && a[i] != b[k]) {
|
||||
k = map[k];
|
||||
}
|
||||
if (a[i] == b[k]) {
|
||||
k++;
|
||||
}
|
||||
}
|
||||
return k;
|
||||
}
|
||||
/**
|
||||
* Returns true if the string consistently uses Windows line endings.
|
||||
*/
|
||||
export function hasOnlyWinLineEndings(string) {
|
||||
return string.includes('\r\n') && !string.startsWith('\n') && !string.match(/[^\r]\n/);
|
||||
}
|
||||
/**
|
||||
* Returns true if the string consistently uses Unix line endings.
|
||||
*/
|
||||
export function hasOnlyUnixLineEndings(string) {
|
||||
return !string.includes('\r\n') && string.includes('\n');
|
||||
}
|
||||
export function trailingWs(string) {
|
||||
// Yes, this looks overcomplicated and dumb - why not replace the whole function with
|
||||
// return string match(/\s*$/)[0]
|
||||
// you ask? Because:
|
||||
// 1. the trap described at https://markamery.com/blog/quadratic-time-regexes/ would mean doing
|
||||
// this would cause this function to take O(n²) time in the worst case (specifically when
|
||||
// there is a massive run of NON-TRAILING whitespace in `string`), and
|
||||
// 2. the fix proposed in the same blog post, of using a negative lookbehind, is incompatible
|
||||
// with old Safari versions that we'd like to not break if possible (see
|
||||
// https://github.com/kpdecker/jsdiff/pull/550)
|
||||
// It feels absurd to do this with an explicit loop instead of a regex, but I really can't see a
|
||||
// better way that doesn't result in broken behaviour.
|
||||
let i;
|
||||
for (i = string.length - 1; i >= 0; i--) {
|
||||
if (!string[i].match(/\s/)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return string.substring(i + 1);
|
||||
}
|
||||
export function leadingWs(string) {
|
||||
// Thankfully the annoying considerations described in trailingWs don't apply here:
|
||||
const match = string.match(/^\s*/);
|
||||
return match ? match[0] : '';
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue