v1.2.0: bugfix for buckets with invalid domain names, and pass request options

- Buckets with names like 'example.bucket' and 'example_bucket' are valid as paths, but not as domain names.
- allow passthrough support for latest @root/request, which supports pipes and streams
This commit is contained in:
AJ ONeal 2021-01-26 16:57:20 -07:00
vanhempi ad0fa1f83b
commit efdfabb9a4
5 muutettua tiedostoa jossa 163 lisäystä ja 56 poistoa

Näytä tiedosto

@ -13,8 +13,28 @@ A lightweight alternative to the S3 SDK that uses only @root/request and aws4.
### Download a file from S3 ### Download a file from S3
This library supports the same streaming options as [@root/request.js](https://git.rootprojects.org/root/request.js).
#### as a stream
```js ```js
s3.get({ var resp = await s3.get({
accessKeyId, // 'AKIAXXXXXXXXXXXXXXXX'
secretAccessKey, // 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
region, // 'us-east-2'
bucket, // 'bucket-name'
prefix, // 'my-prefix/' (optional)
key, // 'data/stats.csv' (omits prefix, if any)
stream // fs.createWriteStream('./path/to/file.bin')
});
await resp.stream;
```
#### in-memory
```js
var resp = await s3.get({
accessKeyId, // 'AKIAXXXXXXXXXXXXXXXX' accessKeyId, // 'AKIAXXXXXXXXXXXXXXXX'
secretAccessKey, // 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' secretAccessKey, // 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
region, // 'us-east-2' region, // 'us-east-2'
@ -22,12 +42,14 @@ s3.get({
prefix, // 'my-prefix/' (optional) prefix, // 'my-prefix/' (optional)
key // 'data/stats.csv' (omits prefix, if any) key // 'data/stats.csv' (omits prefix, if any)
}); });
fs.writeFile(resp.body, './path/to/file.bin');
``` ```
### Upload a new file to S3 ### Upload a new file to S3
```js ```js
s3.set({ await s3.set({
accessKeyId, accessKeyId,
secretAccessKey, secretAccessKey,
region, region,
@ -41,6 +63,36 @@ s3.set({
}); });
``` ```
### Check that a file exists
```js
var resp = await s3.head({
accessKeyId, // 'AKIAXXXXXXXXXXXXXXXX'
secretAccessKey, // 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
region, // 'us-east-2'
bucket, // 'bucket-name'
prefix, // 'my-prefix/' (optional)
key // 'data/stats.csv' (omits prefix, if any)
});
console.log(resp.headers);
```
### Delete file
```js
var resp = await s3.delete({
accessKeyId, // 'AKIAXXXXXXXXXXXXXXXX'
secretAccessKey, // 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
region, // 'us-east-2'
bucket, // 'bucket-name'
prefix, // 'my-prefix/' (optional)
key // 'data/stats.csv' (omits prefix, if any)
});
console.log(resp.headers);
```
### Return signed URL without fetching. ### Return signed URL without fetching.
```js ```js
@ -53,7 +105,7 @@ s3.sign({
prefix, prefix,
key key
}); });
``` ````
### A note on S3 terminology ### A note on S3 terminology

Näytä tiedosto

@ -23,32 +23,34 @@ if (!key || !filepath) {
async function run() { async function run() {
// GET STREAMED FILE // GET STREAMED FILE
await s3 var resp = await s3.get({
.get({ accessKeyId,
accessKeyId, secretAccessKey,
secretAccessKey, region,
region, bucket,
bucket, prefix,
prefix, key,
key stream: filepath
}) });
.then(function (resp) {
console.log(resp.url); console.log('Downloading', resp.url);
return fs.promises.writeFile(filepath, resp.body); await resp.stream;
})
.catch(function (err) { console.log('');
console.error('Error:'); console.log('Saved as', filepath);
if (err.response) { console.log('');
console.error(err.url);
console.error('GET Response:');
console.error(err.response.statusCode);
console.error(err.response.headers);
console.error(err.response.body.toString('utf8'));
} else {
console.error(err);
}
process.exit(1);
});
} }
run(); run().catch(function (err) {
console.error('Error:');
if (err.response) {
console.error(err.url);
console.error('GET Response:');
console.error(err.response.statusCode);
console.error(err.response.headers);
console.error(err.response.body.toString('utf8'));
} else {
console.error(err);
}
process.exit(1);
});

Näytä tiedosto

@ -5,10 +5,40 @@ var request = require('@root/request');
var env = process.env; var env = process.env;
var S3; var S3;
function toAwsBucketHost(host, bucket, region) {
if (host) {
return [host];
}
// Handle simply if it contains only valid subdomain characters
// (most notably that it does not have a '.' or '_')
if (/^[a-z0-9-]+$/i.test(bucket)) {
return ['', bucket + '.s3.amazonaws.com'];
}
// Otherwise use region-specific handling rules
// (TODO: handle other regional exceptions)
// http://www.wryway.com/blog/aws-s3-url-styles/
if (!region || 'us-east-1' === region) {
return ['s3.amazonaws.com'];
}
return ['s3-' + region + '.amazonaws.com'];
}
module.exports = S3 = { module.exports = S3 = {
// HEAD // HEAD
head: function ( head: function (
{ host, accessKeyId, secretAccessKey, region, bucket, prefix, key }, {
host,
accessKeyId,
secretAccessKey,
region,
bucket,
prefix,
key,
...requestOpts
},
_sign _sign
) { ) {
// TODO support minio // TODO support minio
@ -39,9 +69,10 @@ module.exports = S3 = {
// whatever/ => whatever/ // whatever/ => whatever/
prefix = prefix.replace(/\/?$/, '/'); prefix = prefix.replace(/\/?$/, '/');
} }
var [host, defaultHost] = toAwsBucketHost(host, bucket, region);
var signed = aws4.sign( var signed = aws4.sign(
{ {
host: host || bucket + '.s3.amazonaws.com', host: host || defaultHost,
service: 's3', service: 's3',
region: region, region: region,
path: (host ? '/' + bucket : '') + '/' + prefix + key, path: (host ? '/' + bucket : '') + '/' + prefix + key,
@ -55,7 +86,9 @@ module.exports = S3 = {
return url; return url;
} }
return request({ method: 'HEAD', url }).then(function (resp) { return request(
Object.assign(requestOpts, { method: 'HEAD', url })
).then(function (resp) {
if (200 === resp.statusCode) { if (200 === resp.statusCode) {
resp.url = url; resp.url = url;
return resp; return resp;
@ -81,7 +114,8 @@ module.exports = S3 = {
bucket, bucket,
prefix, prefix,
key, key,
json json,
...requestOpts
}, },
_sign _sign
) { ) {
@ -89,9 +123,10 @@ module.exports = S3 = {
if (prefix) { if (prefix) {
prefix = prefix.replace(/\/?$/, '/'); prefix = prefix.replace(/\/?$/, '/');
} }
var [host, defaultHost] = toAwsBucketHost(host, bucket, region);
var signed = aws4.sign( var signed = aws4.sign(
{ {
host: host || bucket + '.s3.amazonaws.com', host: host || defaultHost,
service: 's3', service: 's3',
region: region, region: region,
path: (host ? '/' + bucket : '') + '/' + prefix + key, path: (host ? '/' + bucket : '') + '/' + prefix + key,
@ -110,12 +145,14 @@ module.exports = S3 = {
if (json) { if (json) {
encoding = undefined; encoding = undefined;
} }
return request({ return request(
method: 'GET', Object.assign(requestOpts, {
url, method: 'GET',
encoding: encoding, url,
json: json encoding: encoding,
}).then(function (resp) { json: json
})
).then(function (resp) {
if (200 === resp.statusCode) { if (200 === resp.statusCode) {
resp.url = url; resp.url = url;
return resp; return resp;
@ -142,7 +179,8 @@ module.exports = S3 = {
prefix, prefix,
key, key,
body, body,
size size,
...requestOpts
}, },
_sign _sign
) { ) {
@ -150,9 +188,10 @@ module.exports = S3 = {
if (prefix) { if (prefix) {
prefix = prefix.replace(/\/?$/, '/'); prefix = prefix.replace(/\/?$/, '/');
} }
var [host, defaultHost] = toAwsBucketHost(host, bucket, region);
var signed = aws4.sign( var signed = aws4.sign(
{ {
host: host || bucket + '.s3.amazonaws.com', host: host || defaultHost,
service: 's3', service: 's3',
region: region, region: region,
path: (host ? '/' + bucket : '') + '/' + prefix + key, path: (host ? '/' + bucket : '') + '/' + prefix + key,
@ -167,9 +206,9 @@ module.exports = S3 = {
headers['Content-Length'] = size; headers['Content-Length'] = size;
} }
return request({ method: 'PUT', url, body, headers }).then(function ( return request(
resp Object.assign(requestOpts, { method: 'PUT', url, body, headers })
) { ).then(function (resp) {
if (200 === resp.statusCode) { if (200 === resp.statusCode) {
resp.url = url; resp.url = url;
return resp; return resp;
@ -186,17 +225,27 @@ module.exports = S3 = {
}, },
// DELETE // DELETE
del: function ( delete: function (
{ host, accessKeyId, secretAccessKey, region, bucket, prefix, key }, {
host,
accessKeyId,
secretAccessKey,
region,
bucket,
prefix,
key,
...requestOpts
},
_sign _sign
) { ) {
prefix = prefix || ''; prefix = prefix || '';
if (prefix) { if (prefix) {
prefix = prefix.replace(/\/?$/, '/'); prefix = prefix.replace(/\/?$/, '/');
} }
var [host, defaultHost] = toAwsBucketHost(host, bucket, region);
var signed = aws4.sign( var signed = aws4.sign(
{ {
host: host || bucket + '.s3.amazonaws.com', host: host || defaultHost,
service: 's3', service: 's3',
region: region, region: region,
path: (host ? '/' + bucket : '') + '/' + prefix + key, path: (host ? '/' + bucket : '') + '/' + prefix + key,
@ -207,7 +256,9 @@ module.exports = S3 = {
); );
var url = 'https://' + signed.host + signed.path; var url = 'https://' + signed.host + signed.path;
return request({ method: 'DELETE', url }).then(function (resp) { return request(
Object.assign(requestOpts, { method: 'DELETE', url })
).then(function (resp) {
if (204 === resp.statusCode) { if (204 === resp.statusCode) {
resp.url = url; resp.url = url;
return resp; return resp;
@ -246,3 +297,4 @@ module.exports = S3 = {
} }
} }
}; };
S3.del = S3.delete;

8
package-lock.json generated
Näytä tiedosto

@ -1,13 +1,13 @@
{ {
"name": "@root/s3", "name": "@root/s3",
"version": "1.1.3", "version": "1.2.0",
"lockfileVersion": 1, "lockfileVersion": 1,
"requires": true, "requires": true,
"dependencies": { "dependencies": {
"@root/request": { "@root/request": {
"version": "1.5.0", "version": "1.7.0",
"resolved": "https://registry.npmjs.org/@root/request/-/request-1.5.0.tgz", "resolved": "https://registry.npmjs.org/@root/request/-/request-1.7.0.tgz",
"integrity": "sha512-J9RUIwVU99/cOVuDVYlNpr4G0A1/3ZxhCXIRiTZzu8RntOnb0lmDBMckhaus5ry9x/dBqJKDplFIgwHbLi6rLA==" "integrity": "sha512-lre7XVeEwszgyrayWWb/kRn5fuJfa+n0Nh+rflM9E+EpC28yIYA+FPm/OL1uhzp3TxhQM0HFN4FE2RDIPGlnmg=="
}, },
"aws4": { "aws4": {
"version": "1.9.1", "version": "1.9.1",

Näytä tiedosto

@ -1,6 +1,6 @@
{ {
"name": "@root/s3", "name": "@root/s3",
"version": "1.1.3", "version": "1.2.0",
"description": "A simple, lightweight s3 client with only 2 dependencies", "description": "A simple, lightweight s3 client with only 2 dependencies",
"main": "index.js", "main": "index.js",
"bin": { "bin": {
@ -13,6 +13,7 @@
"example": "examples" "example": "examples"
}, },
"scripts": { "scripts": {
"prettier": "npx prettier -w '**/*.js'",
"test": "node test.js" "test": "node test.js"
}, },
"repository": { "repository": {
@ -27,7 +28,7 @@
"author": "AJ ONeal <coolaj86@gmail.com> (https://coolaj86.com/)", "author": "AJ ONeal <coolaj86@gmail.com> (https://coolaj86.com/)",
"license": "(MIT OR Apache-2.0)", "license": "(MIT OR Apache-2.0)",
"dependencies": { "dependencies": {
"@root/request": "^1.5.0", "@root/request": "^1.7.0",
"aws4": "^1.9.1" "aws4": "^1.9.1"
}, },
"devDependencies": { "devDependencies": {