Skip to content

Commit 80e0b50

Browse files
kanishka1804kamiazya
authored andcommitted
style: apply auto-formatting fixes via biome
1 parent 76508ca commit 80e0b50

File tree

5 files changed

+268
-175
lines changed

5 files changed

+268
-175
lines changed

src/Lexer.spec.ts

Lines changed: 58 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -21,62 +21,60 @@ const LOCATION_SHAPE = {
2121
};
2222

2323
describe("class Lexer", () => {
24+
it("should tokenize record delimiters correctly for various EOL (property test)", () => {
25+
fc.assert(
26+
fc.property(
27+
fc.gen().map((g) => {
28+
const options = g(FC.commonOptions);
29+
const eol = g(FC.eol);
30+
const data = g(FC.csvData, {
31+
fieldConstraints: { minLength: 1 },
32+
rowsConstraints: { minLength: 1 },
33+
columnsConstraints: { minLength: 1 },
34+
});
35+
const csv = data
36+
.map((row) =>
37+
row
38+
.map((field) => escapeField(field, { ...options, quote: true }))
39+
.join(options.delimiter),
40+
)
41+
.join(eol);
2442

25-
  it("should tokenize record delimiters correctly for various EOL (property test)", () => {
26-
    fc.assert(
27-
      fc.property(
28-
        fc.gen().map((g) => {
29-
          const options = g(FC.commonOptions);
30-
          const eol = g(FC.eol);
31-
          const data = g(FC.csvData, {
32-
            fieldConstraints: { minLength: 1 },
33-
            rowsConstraints: { minLength: 1 },
34-
            columnsConstraints: { minLength: 1 },
35-
          });
36-
          const csv =
37-
            data
38-
              .map((row) =>
39-
                row
40-
                  .map((field) => escapeField(field, { ...options, quote: true }))
41-
                  .join(options.delimiter),
42-
              )
43-
              .join(eol);
44-
45-
          const expected = [
46-
            ...data.flatMap((row, i) => [
47-
              ...row.flatMap((field, j) => [
48-
                { type: Field, value: field, location: LOCATION_SHAPE },
49-
                ...(row.length - 1 !== j
50-
                  ? [
51-
                      {
52-
                        type: FieldDelimiter,
53-
                        value: options.delimiter,
54-
                        location: LOCATION_SHAPE,
55-
                      },
56-
                    ]
57-
                  : []),
58-
              ]),
59-
              ...(data.length - 1 !== i
60-
                ? [
61-
                    {
62-
                      type: RecordDelimiter,
63-
                      value: eol,
64-
                      location: LOCATION_SHAPE,
65-
                    },
66-
                  ]
67-
                : []),
68-
            ]),
69-
          ];
70-
          return { csv, options, expected };
71-
        }),
72-
        ({ options, csv, expected }) => {
73-
          const lexer = new Lexer(options);
74-
          const actual = [...lexer.lex(csv)];
75-
          expect(actual).toMatchObject(expected);
76-
        },
77-
      ),
78-
    );
79-
  });
43+
const expected = [
44+
...data.flatMap((row, i) => [
45+
...row.flatMap((field, j) => [
46+
{ type: Field, value: field, location: LOCATION_SHAPE },
47+
...(row.length - 1 !== j
48+
? [
49+
{
50+
type: FieldDelimiter,
51+
value: options.delimiter,
52+
location: LOCATION_SHAPE,
53+
},
54+
]
55+
: []),
56+
]),
57+
...(data.length - 1 !== i
58+
? [
59+
{
60+
type: RecordDelimiter,
61+
value: eol,
62+
location: LOCATION_SHAPE,
63+
},
64+
]
65+
: []),
66+
]),
67+
];
68+
return { csv, options, expected };
69+
}),
70+
({ options, csv, expected }) => {
71+
const lexer = new Lexer(options);
72+
const actual = [...lexer.lex(csv)];
73+
expect(actual).toMatchObject(expected);
74+
},
75+
),
76+
);
77+
});
8078

8179
it("should lex with comma as a default field delimiter", () => {
8280
fc.assert(
@@ -319,11 +317,11 @@ describe("class Lexer", () => {
319317
]),
320318
];
321319
if (data.length > 0 && EOF) {
322-
expected.push({
323-
type: RecordDelimiter,
324-
value: eol,
325-
location: LOCATION_SHAPE
326-
});
320+
expected.push({
321+
type: RecordDelimiter,
322+
value: eol,
323+
location: LOCATION_SHAPE,
324+
});
327325
}
328326
return { csv, data, options, expected };
329327
}),

src/Lexer.test.ts

Lines changed: 100 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -11,24 +11,112 @@ describe("Lexer", () => {
1111
test("should parse mixed CRLF and LF record delimiters correctly", () => {
1212
const csvMixed = "field1,field2\r\nfield3,field4\nfield5,field6";
1313
const tokens = lexer.lex(csvMixed);
14-
14+
1515
expect([...tokens]).toStrictEqual([
1616
// --- Row 1 (Field 1, Delimiter, Field 2) ---
17-
{ type: Field, value: "field1", location: { start: { line: 1, column: 1, offset: 0 }, end: { line: 1, column: 7, offset: 6 }, rowNumber: 1, } },
18-
{ type: FieldDelimiter, value: ",", location: { start: { line: 1, column: 7, offset: 6 }, end: { line: 1, column: 8, offset: 7 }, rowNumber: 1, } },
19-
{ type: Field, value: "field2", location: { start: { line: 1, column: 8, offset: 7 }, end: { line: 1, column: 14, offset: 13 }, rowNumber: 1, } },
17+
{
18+
type: Field,
19+
value: "field1",
20+
location: {
21+
start: { line: 1, column: 1, offset: 0 },
22+
end: { line: 1, column: 7, offset: 6 },
23+
rowNumber: 1,
24+
},
25+
},
26+
{
27+
type: FieldDelimiter,
28+
value: ",",
29+
location: {
30+
start: { line: 1, column: 7, offset: 6 },
31+
end: { line: 1, column: 8, offset: 7 },
32+
rowNumber: 1,
33+
},
34+
},
35+
{
36+
type: Field,
37+
value: "field2",
38+
location: {
39+
start: { line: 1, column: 8, offset: 7 },
40+
end: { line: 1, column: 14, offset: 13 },
41+
rowNumber: 1,
42+
},
43+
},
2044
// --- Record Delimiter 1 (CRLF) ---
21-
{ type: RecordDelimiter, value: "\r\n", location: { start: { line: 1, column: 14, offset: 13 }, end: { line: 2, column: 1, offset: 15 }, rowNumber: 1, } },
45+
{
46+
type: RecordDelimiter,
47+
value: "\r\n",
48+
location: {
49+
start: { line: 1, column: 14, offset: 13 },
50+
end: { line: 2, column: 1, offset: 15 },
51+
rowNumber: 1,
52+
},
53+
},
2254
// --- Row 2 (Field 3, Delimiter, Field 4) ---
23-
{ type: Field, value: "field3", location: { start: { line: 2, column: 1, offset: 15 }, end: { line: 2, column: 7, offset: 21 }, rowNumber: 2, } },
24-
{ type: FieldDelimiter, value: ",", location: { start: { line: 2, column: 7, offset: 21 }, end: { line: 2, column: 8, offset: 22 }, rowNumber: 2, } },
25-
{ type: Field, value: "field4", location: { start: { line: 2, column: 8, offset: 22 }, end: { line: 2, column: 14, offset: 28 }, rowNumber: 2, } },
55+
{
56+
type: Field,
57+
value: "field3",
58+
location: {
59+
start: { line: 2, column: 1, offset: 15 },
60+
end: { line: 2, column: 7, offset: 21 },
61+
rowNumber: 2,
62+
},
63+
},
64+
{
65+
type: FieldDelimiter,
66+
value: ",",
67+
location: {
68+
start: { line: 2, column: 7, offset: 21 },
69+
end: { line: 2, column: 8, offset: 22 },
70+
rowNumber: 2,
71+
},
72+
},
73+
{
74+
type: Field,
75+
value: "field4",
76+
location: {
77+
start: { line: 2, column: 8, offset: 22 },
78+
end: { line: 2, column: 14, offset: 28 },
79+
rowNumber: 2,
80+
},
81+
},
2682
// --- Record Delimiter 2 (LF) ---
27-
{ type: RecordDelimiter, value: "\n", location: { start: { line: 2, column: 14, offset: 28 }, end: { line: 3, column: 1, offset: 29 }, rowNumber: 2, } },
83+
{
84+
type: RecordDelimiter,
85+
value: "\n",
86+
location: {
87+
start: { line: 2, column: 14, offset: 28 },
88+
end: { line: 3, column: 1, offset: 29 },
89+
rowNumber: 2,
90+
},
91+
},
2892
// --- Row 3 (Field 5, Delimiter, Field 6) ---
29-
{ type: Field, value: "field5", location: { start: { line: 3, column: 1, offset: 29 }, end: { line: 3, column: 7, offset: 35 }, rowNumber: 3, } },
30-
{ type: FieldDelimiter, value: ",", location: { start: { line: 3, column: 7, offset: 35 }, end: { line: 3, column: 8, offset: 36 }, rowNumber: 3, } },
31-
{ type: Field, value: "field6", location: { start: { line: 3, column: 8, offset: 36 }, end: { line: 3, column: 14, offset: 42 }, rowNumber: 3, } },
93+
{
94+
type: Field,
95+
value: "field5",
96+
location: {
97+
start: { line: 3, column: 1, offset: 29 },
98+
end: { line: 3, column: 7, offset: 35 },
99+
rowNumber: 3,
100+
},
101+
},
102+
{
103+
type: FieldDelimiter,
104+
value: ",",
105+
location: {
106+
start: { line: 3, column: 7, offset: 35 },
107+
end: { line: 3, column: 8, offset: 36 },
108+
rowNumber: 3,
109+
},
110+
},
111+
{
112+
type: Field,
113+
value: "field6",
114+
location: {
115+
start: { line: 3, column: 8, offset: 36 },
116+
end: { line: 3, column: 14, offset: 42 },
117+
rowNumber: 3,
118+
},
119+
},
32120
]);
33121
});
34122

src/Lexer.ts

Lines changed: 16 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,6 @@ export class Lexer<
9393
* @yields Tokens from the buffered CSV data.
9494
*/
9595
*#tokens(): Generator<Token> {
96-
9796
let token: Token | null;
9897
while ((token = this.#nextToken())) {
9998
yield token;
@@ -156,23 +155,23 @@ export class Lexer<
156155
}
157156

158157
// Check for Delimiter
159-
    if (this.#buffer.startsWith(this.#delimiter)) {
158+
if (this.#buffer.startsWith(this.#delimiter)) {
160159
// FIX: Slice the buffer by the full delimiter length
161-
      this.#buffer = this.#buffer.slice(this.#fieldDelimiterLength);
162-
      const start: Position = { ...this.#cursor };
160+
this.#buffer = this.#buffer.slice(this.#fieldDelimiterLength);
161+
const start: Position = { ...this.#cursor };
163162
// FIX: Advance the column/offset by the full delimiter length
164-
      this.#cursor.column += this.#fieldDelimiterLength;
165-
      this.#cursor.offset += this.#fieldDelimiterLength;
166-
      return {
167-
        type: FieldDelimiter,
168-
        value: this.#delimiter,
169-
        location: {
170-
          start,
171-
          end: { ...this.#cursor },
172-
          rowNumber: this.#rowNumber,
173-
        },
174-
      };
175-
    }
163+
this.#cursor.column += this.#fieldDelimiterLength;
164+
this.#cursor.offset += this.#fieldDelimiterLength;
165+
return {
166+
type: FieldDelimiter,
167+
value: this.#delimiter,
168+
location: {
169+
start,
170+
end: { ...this.#cursor },
171+
rowNumber: this.#rowNumber,
172+
},
173+
};
174+
}
176175

177176
// Check for Quoted String
178177
if (this.#buffer.startsWith(this.#quotation)) {
@@ -277,7 +276,7 @@ export class Lexer<
277276
const match = this.#matcher.exec(this.#buffer);
278277
if (match) {
279278
// When buffering (not flushing) and the match fully consumes the buffer,
280-
      // defer emission to wait for the next chunk (field may be followed by delimiter/quote)
279+
// defer emission to wait for the next chunk (field may be followed by delimiter/quote)
281280
if (this.#flush === false && match[0].length === this.#buffer.length) {
282281
return null;
283282
}

src/LexerTransformer.spec.ts

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -161,20 +161,20 @@ describe("LexerTransformer", () => {
161161
),
162162
{
163163
examples: [
164-
          [
165-
            // The single EOL must now be tokenized correctly by the fixed Lexer
166-
            {
167-
              options: { delimiter: ",", quotation: '"' },
168-
              chunks: ["\n"],
169-
              expected: [
170-
                {
171-
                  type: RecordDelimiter,
172-
                  value: "\n",
173-
                  location: LOCATION_SHAPE,
174-
                },
175-
              ],
176-
            },
177-
          ],
164+
[
165+
// The single EOL must now be tokenized correctly by the fixed Lexer
166+
{
167+
options: { delimiter: ",", quotation: '"' },
168+
chunks: ["\n"],
169+
expected: [
170+
{
171+
type: RecordDelimiter,
172+
value: "\n",
173+
location: LOCATION_SHAPE,
174+
},
175+
],
176+
},
177+
],
178178
],
179179
},
180180
);

0 commit comments

Comments
 (0)