Skip to content

Commit 8413c70

Browse files
authored
fix: test for file under/over reads (#300)
When we read too much or too little data, it normally means that the UnixFS metadata in the root node is incorrect, so throw an error.
1 parent 4f316ac commit 8413c70

File tree

2 files changed

+164
-2
lines changed

2 files changed

+164
-2
lines changed

packages/ipfs-unixfs-exporter/src/resolvers/unixfs-v1/content/file.ts

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -137,15 +137,27 @@ const fileContent: UnixfsV1Resolver = (cid, node, unixfs, path, resolve, depth,
137137
return
138138
}
139139

140+
let read = 0n
140141
const queue = pushable()
141142

142143
void walkDAG(blockstore, node, queue, 0n, offset, offset + length, options)
144+
.then(() => {
145+
const wanted = length - offset
146+
147+
if (read < wanted) {
148+
throw errCode(new Error('Traversed entire DAG but did not read enough bytes'), 'ERR_UNDER_READ')
149+
}
150+
151+
if (read > wanted) {
152+
throw errCode(new Error('Read too many bytes - the file size reported by the UnixFS data in the root node may be incorrect'), 'ERR_OVER_READ')
153+
}
154+
155+
queue.end()
156+
})
143157
.catch(err => {
144158
queue.end(err)
145159
})
146160

147-
let read = 0n
148-
149161
for await (const buf of queue) {
150162
if (buf == null) {
151163
continue

packages/ipfs-unixfs-exporter/test/exporter-esoteric.spec.ts

Lines changed: 150 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -211,4 +211,154 @@ describe('exporter esoteric DAGs', () => {
211211
const data = uint8ArrayConcat(await all(exported.content()))
212212
expect(data).to.deep.equal(buf)
213213
})
214+
215+
it('errors on DAG with blocksizes that are too large', async () => {
216+
const leaves = await Promise.all([
217+
randomBytes(5),
218+
randomBytes(3),
219+
randomBytes(6)
220+
].map(async buf => {
221+
return {
222+
cid: await storeBlock(buf, raw.code),
223+
buf
224+
}
225+
}))
226+
227+
const unixfs = new UnixFS({
228+
type: 'file',
229+
blockSizes: [
230+
BigInt(leaves[0].buf.byteLength),
231+
BigInt(leaves[1].buf.byteLength + 5), // this is wrong
232+
BigInt(leaves[2].buf.byteLength)
233+
]
234+
})
235+
236+
const rootNode = {
237+
Data: unixfs.marshal(),
238+
Links: [{
239+
Name: '',
240+
Hash: leaves[0].cid,
241+
Tsize: leaves[0].buf.byteLength
242+
}, {
243+
Name: '',
244+
Hash: leaves[1].cid,
245+
Tsize: leaves[1].buf.byteLength
246+
}, {
247+
Name: '',
248+
Hash: leaves[2].cid,
249+
Tsize: leaves[2].buf.byteLength
250+
}]
251+
}
252+
253+
const rootBuf = dagPb.encode(rootNode)
254+
const rootCid = await storeBlock(rootBuf, dagPb.code)
255+
const exported = await exporter(rootCid, block)
256+
257+
if (exported.type !== 'file') {
258+
throw new Error('Unexpected type')
259+
}
260+
261+
await expect(all(exported.content())).to.eventually.be.rejected
262+
.with.property('code', 'ERR_UNDER_READ')
263+
})
264+
265+
it('errors on DAG with blocksizes that are too small', async () => {
266+
const leaves = await Promise.all([
267+
randomBytes(5),
268+
randomBytes(3),
269+
randomBytes(6)
270+
].map(async buf => {
271+
return {
272+
cid: await storeBlock(buf, raw.code),
273+
buf
274+
}
275+
}))
276+
277+
const unixfs = new UnixFS({
278+
type: 'file',
279+
blockSizes: [
280+
BigInt(leaves[0].buf.byteLength),
281+
BigInt(leaves[1].buf.byteLength - 2), // this is wrong
282+
BigInt(leaves[2].buf.byteLength)
283+
]
284+
})
285+
286+
const rootNode = {
287+
Data: unixfs.marshal(),
288+
Links: [{
289+
Name: '',
290+
Hash: leaves[0].cid,
291+
Tsize: leaves[0].buf.byteLength
292+
}, {
293+
Name: '',
294+
Hash: leaves[1].cid,
295+
Tsize: leaves[1].buf.byteLength
296+
}, {
297+
Name: '',
298+
Hash: leaves[2].cid,
299+
Tsize: leaves[2].buf.byteLength
300+
}]
301+
}
302+
303+
const rootBuf = dagPb.encode(rootNode)
304+
const rootCid = await storeBlock(rootBuf, dagPb.code)
305+
const exported = await exporter(rootCid, block)
306+
307+
if (exported.type !== 'file') {
308+
throw new Error('Unexpected type')
309+
}
310+
311+
await expect(all(exported.content())).to.eventually.be.rejected
312+
.with.property('code', 'ERR_OVER_READ')
313+
})
314+
315+
it('errors on DAG with incorrect number of blocksizes', async () => {
316+
const leaves = await Promise.all([
317+
randomBytes(5),
318+
randomBytes(3),
319+
randomBytes(6)
320+
].map(async buf => {
321+
return {
322+
cid: await storeBlock(buf, raw.code),
323+
buf
324+
}
325+
}))
326+
327+
const unixfs = new UnixFS({
328+
type: 'file',
329+
blockSizes: [
330+
BigInt(leaves[0].buf.byteLength),
331+
// BigInt(leaves[1].buf.byteLength), // this is wrong
332+
BigInt(leaves[2].buf.byteLength)
333+
]
334+
})
335+
336+
const rootNode = {
337+
Data: unixfs.marshal(),
338+
Links: [{
339+
Name: '',
340+
Hash: leaves[0].cid,
341+
Tsize: leaves[0].buf.byteLength
342+
}, {
343+
Name: '',
344+
Hash: leaves[1].cid,
345+
Tsize: leaves[1].buf.byteLength
346+
}, {
347+
Name: '',
348+
Hash: leaves[2].cid,
349+
Tsize: leaves[2].buf.byteLength
350+
}]
351+
}
352+
353+
const rootBuf = dagPb.encode(rootNode)
354+
const rootCid = await storeBlock(rootBuf, dagPb.code)
355+
const exported = await exporter(rootCid, block)
356+
357+
if (exported.type !== 'file') {
358+
throw new Error('Unexpected type')
359+
}
360+
361+
await expect(all(exported.content())).to.eventually.be.rejected
362+
.with.property('code', 'ERR_NOT_UNIXFS')
363+
})
214364
})

0 commit comments

Comments
 (0)