forked from ZJONSSON/parquetjs
-
Notifications
You must be signed in to change notification settings - Fork 0
/
list.js
95 lines (78 loc) · 2.78 KB
/
list.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
'use strict';
const chai = require('chai');
const assert = chai.assert;
const parquet = require('../parquet.js');
/*
This test creates a test file that has an annotated LIST wrapper that works with AWS Athena
Currently the schema (and the input data) needs to follow the specification for an annotated list
The Athena schema for this test is `id string, test (array<struct<a:string,b:string>>)`
but instead of input data `{id: 'Row1', test: [{a: 'test1', b: 1}, {a: 'test2', b: 2}, {a: 'test3', b: 3}]}`
we need to wrap the data inside `list` and every element inside `element`, i.e:
`{id: 'Row1', test: {list: [{element: {a:'test1', b:1}}, {element: { a: 'test2', b: 2}}, {element: {a: 'test3', b: 3}}]}`
and the schema needs to match this structure as well (see listSchema below)
To see a working example on Athena, run this test and copy the list.parquet file to an s3 bucket.
In Athena create the listTest table with the following command:
CREATE EXTERNAL TABLE `listTest`(
id string,
`test` array<struct<a:string,b:int>>
)
ROW FORMAT SERDE
'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'
STORED AS INPUTFORMAT
'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'
OUTPUTFORMAT
'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'
LOCATION
's3://s3bucket/.../list.parquet'
And verify that Athena parses the parquet file correctly by `SELECT * from listTest`
*/
const listSchema = new parquet.ParquetSchema({
id: { type: 'UTF8'},
test: {
type: 'LIST',
fields: {
list: {
repeated: true,
fields: {
element: {
fields: {
a: {type: 'UTF8'},
b: {type: 'INT64'}
}
}
}
}
}
}
});
describe('list', async function() {
let reader;
const row1 = {
id: 'Row1',
test: {list: [{element: {a:'test1', b:1n}}, {element: { a: 'test2', b: 2n}}, {element: {a: 'test3', b: 3n}}]}
};
const row2 = {
id: 'Row2',
test: {list: [{element: {a:'test4', b:4n}}]}
};
before(async function(){
let writer = await parquet.ParquetWriter.openFile(listSchema, 'list.parquet', {pageSize: 100});
writer.appendRow(row1);
writer.appendRow(row2);
await writer.close();
reader = await parquet.ParquetReader.openFile('list.parquet');
});
it('schema is encoded correctly', async function() {
const schema = reader.metadata.schema;
assert.equal(schema.length, 7);
assert.equal(schema[2].name, 'test');
assert.equal(schema[2].converted_type, 3);
});
it('output matches input', async function() {
const cursor = reader.getCursor();
let row = await cursor.next();
assert.deepEqual(row, row1);
row = await cursor.next();
assert.deepEqual(row, row2);
});
});