Commit bf137ad8 authored by Patrick's avatar Patrick
Browse files

CHANGES.md, README.md und 248 weitere dateien aktualisiert...

parent 0081cef7
Showing with 568 additions and 1673 deletions
+568 -1673
# assert-plus Changelog # assert-plus Changelog
## 1.0.0
- *BREAKING* assert.number (and derivatives) now accept Infinity as valid input
- Add assert.finite check. Previous assert.number callers should use this if
they expect Infinity inputs to throw.
## 0.2.0 ## 0.2.0
- Fix `assert.object(null)` so it throws - Fix `assert.object(null)` so it throws
......
...@@ -74,48 +74,41 @@ The complete list of APIs is: ...@@ -74,48 +74,41 @@ The complete list of APIs is:
* assert.buffer * assert.buffer
* assert.func * assert.func
* assert.number * assert.number
* assert.finite
* assert.object * assert.object
* assert.string * assert.string
* assert.stream * assert.stream
* assert.date * assert.date
* assert.regexp * assert.regex
* assert.uuid * assert.uuid
* assert.arrayOfArray * assert.arrayOfArray
* assert.arrayOfBool * assert.arrayOfBool
* assert.arrayOfBuffer * assert.arrayOfBuffer
* assert.arrayOfFunc * assert.arrayOfFunc
* assert.arrayOfNumber * assert.arrayOfNumber
* assert.arrayOfFinite
* assert.arrayOfObject * assert.arrayOfObject
* assert.arrayOfString * assert.arrayOfString
* assert.arrayOfStream * assert.arrayOfStream
* assert.arrayOfDate * assert.arrayOfDate
* assert.arrayOfRegexp
* assert.arrayOfUuid * assert.arrayOfUuid
* assert.optionalArray * assert.optionalArray
* assert.optionalBool * assert.optionalBool
* assert.optionalBuffer * assert.optionalBuffer
* assert.optionalFunc * assert.optionalFunc
* assert.optionalNumber * assert.optionalNumber
* assert.optionalFinite
* assert.optionalObject * assert.optionalObject
* assert.optionalString * assert.optionalString
* assert.optionalStream * assert.optionalStream
* assert.optionalDate * assert.optionalDate
* assert.optionalRegexp
* assert.optionalUuid * assert.optionalUuid
* assert.optionalArrayOfArray * assert.optionalArrayOfArray
* assert.optionalArrayOfBool * assert.optionalArrayOfBool
* assert.optionalArrayOfBuffer * assert.optionalArrayOfBuffer
* assert.optionalArrayOfFunc * assert.optionalArrayOfFunc
* assert.optionalArrayOfNumber * assert.optionalArrayOfNumber
* assert.optionalArrayOfFinite
* assert.optionalArrayOfObject * assert.optionalArrayOfObject
* assert.optionalArrayOfString * assert.optionalArrayOfString
* assert.optionalArrayOfStream * assert.optionalArrayOfStream
* assert.optionalArrayOfDate * assert.optionalArrayOfDate
* assert.optionalArrayOfRegexp
* assert.optionalArrayOfUuid * assert.optionalArrayOfUuid
* assert.AssertionError * assert.AssertionError
* assert.fail * assert.fail
......
...@@ -55,11 +55,6 @@ var types = { ...@@ -55,11 +55,6 @@ var types = {
} }
}, },
number: { number: {
check: function (arg) {
return typeof (arg) === 'number' && !isNaN(arg);
}
},
finite: {
check: function (arg) { check: function (arg) {
return typeof (arg) === 'number' && !isNaN(arg) && isFinite(arg); return typeof (arg) === 'number' && !isNaN(arg) && isFinite(arg);
} }
......
{ {
"_from": "assert-plus@^1.0.0", "_from": "assert-plus@^0.2.0",
"_id": "assert-plus@1.0.0", "_id": "assert-plus@0.2.0",
"_inBundle": false, "_inBundle": false,
"_integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=", "_integrity": "sha1-104bh+ev/A24qttwIfP+SBAasjQ=",
"_location": "/assert-plus", "_location": "/assert-plus",
"_phantomChildren": {}, "_phantomChildren": {},
"_requested": { "_requested": {
"type": "range", "type": "range",
"registry": true, "registry": true,
"raw": "assert-plus@^1.0.0", "raw": "assert-plus@^0.2.0",
"name": "assert-plus", "name": "assert-plus",
"escapedName": "assert-plus", "escapedName": "assert-plus",
"rawSpec": "^1.0.0", "rawSpec": "^0.2.0",
"saveSpec": null, "saveSpec": null,
"fetchSpec": "^1.0.0" "fetchSpec": "^0.2.0"
}, },
"_requiredBy": [ "_requiredBy": [
"/dashdash", "/http-signature"
"/getpass",
"/http-signature",
"/jsprim",
"/sshpk",
"/verror"
], ],
"_resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", "_resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-0.2.0.tgz",
"_shasum": "f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525", "_shasum": "d74e1b87e7affc0db8aadb7021f3fe48101ab234",
"_spec": "assert-plus@^1.0.0", "_spec": "assert-plus@^0.2.0",
"_where": "C:\\Work\\OneDrive - bwstaff\\M4_Lab\\TV3\\NewVersion01\\dev\\node_modules\\http-signature", "_where": "C:\\Work\\OneDrive - bwstaff\\M4_Lab\\Main\\02_Plattform_Main\\m4labplatform\\node_modules\\http-signature",
"author": { "author": {
"name": "Mark Cavage", "name": "Mark Cavage",
"email": "mcavage@gmail.com" "email": "mcavage@gmail.com"
...@@ -83,5 +78,5 @@ ...@@ -83,5 +78,5 @@
"scripts": { "scripts": {
"test": "tape tests/*.js | ./node_modules/.bin/faucet" "test": "tape tests/*.js | ./node_modules/.bin/faucet"
}, },
"version": "1.0.0" "version": "0.2.0"
} }
...@@ -133,7 +133,7 @@ function stringToSign (options) { ...@@ -133,7 +133,7 @@ function stringToSign (options) {
] ]
return r.join('\n') return r.join('\n')
} }
module.exports.stringToSign = stringToSign module.exports.queryStringToSign = stringToSign
/** /**
* Return a string for sign() with the given `options`, but is meant exclusively * Return a string for sign() with the given `options`, but is meant exclusively
......
{ {
"_from": "aws-sign2@~0.7.0", "_from": "aws-sign2@~0.6.0",
"_id": "aws-sign2@0.7.0", "_id": "aws-sign2@0.6.0",
"_inBundle": false, "_inBundle": false,
"_integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=", "_integrity": "sha1-FDQt0428yU0OW4fXY81jYSwOeU8=",
"_location": "/aws-sign2", "_location": "/aws-sign2",
"_phantomChildren": {}, "_phantomChildren": {},
"_requested": { "_requested": {
"type": "range", "type": "range",
"registry": true, "registry": true,
"raw": "aws-sign2@~0.7.0", "raw": "aws-sign2@~0.6.0",
"name": "aws-sign2", "name": "aws-sign2",
"escapedName": "aws-sign2", "escapedName": "aws-sign2",
"rawSpec": "~0.7.0", "rawSpec": "~0.6.0",
"saveSpec": null, "saveSpec": null,
"fetchSpec": "~0.7.0" "fetchSpec": "~0.6.0"
}, },
"_requiredBy": [ "_requiredBy": [
"/request" "/request"
], ],
"_resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", "_resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.6.0.tgz",
"_shasum": "b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8", "_shasum": "14342dd38dbcc94d0e5b87d763cd63612c0e794f",
"_spec": "aws-sign2@~0.7.0", "_spec": "aws-sign2@~0.6.0",
"_where": "C:\\Work\\OneDrive - bwstaff\\M4_Lab\\TV3\\NewVersion01\\dev\\node_modules\\request", "_where": "C:\\Work\\OneDrive - bwstaff\\M4_Lab\\Main\\02_Plattform_Main\\m4labplatform\\node_modules\\request",
"author": { "author": {
"name": "Mikeal Rogers", "name": "Mikeal Rogers",
"email": "mikeal.rogers@gmail.com", "email": "mikeal.rogers@gmail.com",
...@@ -46,5 +46,5 @@ ...@@ -46,5 +46,5 @@
"repository": { "repository": {
"url": "git+https://github.com/mikeal/aws-sign.git" "url": "git+https://github.com/mikeal/aws-sign.git"
}, },
"version": "0.7.0" "version": "0.6.0"
} }
aws4 aws4
---- ----
[![Build Status](https://secure.travis-ci.org/mhart/aws4.png?branch=master)](http://travis-ci.org/mhart/aws4) [![Build Status](https://api.travis-ci.org/mhart/aws4.png?branch=master)](https://travis-ci.org/github/mhart/aws4)
A small utility to sign vanilla Node.js http(s) request options using Amazon's A small utility to sign vanilla Node.js http(s) request options using Amazon's
[AWS Signature Version 4](http://docs.amazonwebservices.com/general/latest/gr/signature-version-4.html). [AWS Signature Version 4](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
If you want to sign and send AWS requests in a modern browser, or an environment like [Cloudflare Workers](https://developers.cloudflare.com/workers/), then check out [aws4fetch](https://github.com/mhart/aws4fetch) – otherwise you can also bundle this library for use [in the browser](./browser). If you want to sign and send AWS requests in a modern browser, or an environment like [Cloudflare Workers](https://developers.cloudflare.com/workers/), then check out [aws4fetch](https://github.com/mhart/aws4fetch) – otherwise you can also bundle this library for use [in older browsers](./browser).
This signature is supported by nearly all Amazon services, including The only AWS service that *doesn't* support v4 as of 2020-05-22 is
[S3](http://docs.aws.amazon.com/AmazonS3/latest/API/), [SimpleDB](https://docs.aws.amazon.com/AmazonSimpleDB/latest/DeveloperGuide/SDB_API.html)
[EC2](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/), (it only supports [AWS Signature Version 2](https://github.com/mhart/aws2)).
[DynamoDB](http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/API.html),
[Kinesis](http://docs.aws.amazon.com/kinesis/latest/APIReference/),
[Lambda](http://docs.aws.amazon.com/lambda/latest/dg/API_Reference.html),
[SQS](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/),
[SNS](http://docs.aws.amazon.com/sns/latest/api/),
[IAM](http://docs.aws.amazon.com/IAM/latest/APIReference/),
[STS](http://docs.aws.amazon.com/STS/latest/APIReference/),
[RDS](http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/),
[CloudWatch](http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/),
[CloudWatch Logs](http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/),
[CodeDeploy](http://docs.aws.amazon.com/codedeploy/latest/APIReference/),
[CloudFront](http://docs.aws.amazon.com/AmazonCloudFront/latest/APIReference/),
[CloudTrail](http://docs.aws.amazon.com/awscloudtrail/latest/APIReference/),
[ElastiCache](http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/),
[EMR](http://docs.aws.amazon.com/ElasticMapReduce/latest/API/),
[Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/amazon-glacier-api.html),
[CloudSearch](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/APIReq.html),
[Elastic Load Balancing](http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/),
[Elastic Transcoder](http://docs.aws.amazon.com/elastictranscoder/latest/developerguide/api-reference.html),
[CloudFormation](http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/),
[Elastic Beanstalk](http://docs.aws.amazon.com/elasticbeanstalk/latest/api/),
[Storage Gateway](http://docs.aws.amazon.com/storagegateway/latest/userguide/AWSStorageGatewayAPI.html),
[Data Pipeline](http://docs.aws.amazon.com/datapipeline/latest/APIReference/),
[Direct Connect](http://docs.aws.amazon.com/directconnect/latest/APIReference/),
[Redshift](http://docs.aws.amazon.com/redshift/latest/APIReference/),
[OpsWorks](http://docs.aws.amazon.com/opsworks/latest/APIReference/),
[SES](http://docs.aws.amazon.com/ses/latest/APIReference/),
[SWF](http://docs.aws.amazon.com/amazonswf/latest/apireference/),
[AutoScaling](http://docs.aws.amazon.com/AutoScaling/latest/APIReference/),
[Mobile Analytics](http://docs.aws.amazon.com/mobileanalytics/latest/ug/server-reference.html),
[Cognito Identity](http://docs.aws.amazon.com/cognitoidentity/latest/APIReference/),
[Cognito Sync](http://docs.aws.amazon.com/cognitosync/latest/APIReference/),
[Container Service](http://docs.aws.amazon.com/AmazonECS/latest/APIReference/),
[AppStream](http://docs.aws.amazon.com/appstream/latest/developerguide/appstream-api-rest.html),
[Key Management Service](http://docs.aws.amazon.com/kms/latest/APIReference/),
[Config](http://docs.aws.amazon.com/config/latest/APIReference/),
[CloudHSM](http://docs.aws.amazon.com/cloudhsm/latest/dg/api-ref.html),
[Route53](http://docs.aws.amazon.com/Route53/latest/APIReference/requests-rest.html) and
[Route53 Domains](http://docs.aws.amazon.com/Route53/latest/APIReference/requests-rpc.html).
Indeed, the only AWS services that *don't* support v4 as of 2014-12-30 are
[Import/Export](http://docs.aws.amazon.com/AWSImportExport/latest/DG/api-reference.html) and
[SimpleDB](http://docs.aws.amazon.com/AmazonSimpleDB/latest/DeveloperGuide/SDB_API.html)
(they only support [AWS Signature Version 2](https://github.com/mhart/aws2)).
It also provides defaults for a number of core AWS headers and It also provides defaults for a number of core AWS headers and
request parameters, making it very easy to query AWS services, or request parameters, making it very easy to query AWS services, or
...@@ -64,21 +20,34 @@ Example ...@@ -64,21 +20,34 @@ Example
------- -------
```javascript ```javascript
var http = require('http'), var https = require('https')
https = require('https'), var aws4 = require('aws4')
aws4 = require('aws4')
// to illustrate usage, we'll create a utility function to request and pipe to stdout
function request(opts) { https.request(opts, function(res) { res.pipe(process.stdout) }).end(opts.body || '') }
// aws4 will sign an options object as you'd pass to http.request, with an AWS service and region
var opts = { host: 'my-bucket.s3.us-west-1.amazonaws.com', path: '/my-object', service: 's3', region: 'us-west-1' }
// aws4.sign() will sign and modify these options, ready to pass to http.request
aws4.sign(opts, { accessKeyId: '', secretAccessKey: '' })
// or it can get credentials from process.env.AWS_ACCESS_KEY_ID, etc
aws4.sign(opts)
// given an options object you could pass to http.request // for most AWS services, aws4 can figure out the service and region if you pass a host
var opts = {host: 'sqs.us-east-1.amazonaws.com', path: '/?Action=ListQueues'} opts = { host: 'my-bucket.s3.us-west-1.amazonaws.com', path: '/my-object' }
// alternatively (as aws4 can infer the host): // usually it will add/modify request headers, but you can also sign the query:
opts = {service: 'sqs', region: 'us-east-1', path: '/?Action=ListQueues'} opts = { host: 'my-bucket.s3.amazonaws.com', path: '/?X-Amz-Expires=12345', signQuery: true }
// alternatively (as us-east-1 is default): // and for services with simple hosts, aws4 can infer the host from service and region:
opts = {service: 'sqs', path: '/?Action=ListQueues'} opts = { service: 'sqs', region: 'us-east-1', path: '/?Action=ListQueues' }
aws4.sign(opts) // assumes AWS credentials are available in process.env // and if you're using us-east-1, it's the default:
opts = { service: 'sqs', path: '/?Action=ListQueues' }
aws4.sign(opts)
console.log(opts) console.log(opts)
/* /*
{ {
...@@ -92,37 +61,23 @@ console.log(opts) ...@@ -92,37 +61,23 @@ console.log(opts)
} }
*/ */
// we can now use this to query AWS using the standard node.js http API // we can now use this to query AWS
http.request(opts, function(res) { res.pipe(process.stdout) }).end() request(opts)
/* /*
<?xml version="1.0"?> <?xml version="1.0"?>
<ListQueuesResponse xmlns="http://queue.amazonaws.com/doc/2012-11-05/"> <ListQueuesResponse xmlns="https://queue.amazonaws.com/doc/2012-11-05/">
... ...
*/ */
```
More options
------------
```javascript
// you can also pass AWS credentials in explicitly (otherwise taken from process.env)
aws4.sign(opts, {accessKeyId: '', secretAccessKey: ''})
// can also add the signature to query strings
aws4.sign({service: 's3', path: '/my-bucket?X-Amz-Expires=12345', signQuery: true})
// create a utility function to pipe to stdout (with https this time)
function request(o) { https.request(o, function(res) { res.pipe(process.stdout) }).end(o.body || '') }
// aws4 can infer the HTTP method if a body is passed in // aws4 can infer the HTTP method if a body is passed in
// method will be POST and Content-Type: 'application/x-www-form-urlencoded; charset=utf-8' // method will be POST and Content-Type: 'application/x-www-form-urlencoded; charset=utf-8'
request(aws4.sign({service: 'iam', body: 'Action=ListGroups&Version=2010-05-08'})) request(aws4.sign({ service: 'iam', body: 'Action=ListGroups&Version=2010-05-08' }))
/* /*
<ListGroupsResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/"> <ListGroupsResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
... ...
*/ */
// can specify any custom option or header as per usual // you can specify any custom option or header as per usual
request(aws4.sign({ request(aws4.sign({
service: 'dynamodb', service: 'dynamodb',
region: 'ap-southeast-2', region: 'ap-southeast-2',
...@@ -139,303 +94,7 @@ request(aws4.sign({ ...@@ -139,303 +94,7 @@ request(aws4.sign({
... ...
*/ */
// works with all other services that support Signature Version 4 // The raw RequestSigner can be used to generate CodeCommit Git passwords
request(aws4.sign({service: 's3', path: '/', signQuery: true}))
/*
<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
...
*/
request(aws4.sign({service: 'ec2', path: '/?Action=DescribeRegions&Version=2014-06-15'}))
/*
<DescribeRegionsResponse xmlns="http://ec2.amazonaws.com/doc/2014-06-15/">
...
*/
request(aws4.sign({service: 'sns', path: '/?Action=ListTopics&Version=2010-03-31'}))
/*
<ListTopicsResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
...
*/
request(aws4.sign({service: 'sts', path: '/?Action=GetSessionToken&Version=2011-06-15'}))
/*
<GetSessionTokenResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
...
*/
request(aws4.sign({service: 'cloudsearch', path: '/?Action=ListDomainNames&Version=2013-01-01'}))
/*
<ListDomainNamesResponse xmlns="http://cloudsearch.amazonaws.com/doc/2013-01-01/">
...
*/
request(aws4.sign({service: 'ses', path: '/?Action=ListIdentities&Version=2010-12-01'}))
/*
<ListIdentitiesResponse xmlns="http://ses.amazonaws.com/doc/2010-12-01/">
...
*/
request(aws4.sign({service: 'autoscaling', path: '/?Action=DescribeAutoScalingInstances&Version=2011-01-01'}))
/*
<DescribeAutoScalingInstancesResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
...
*/
request(aws4.sign({service: 'elasticloadbalancing', path: '/?Action=DescribeLoadBalancers&Version=2012-06-01'}))
/*
<DescribeLoadBalancersResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/">
...
*/
request(aws4.sign({service: 'cloudformation', path: '/?Action=ListStacks&Version=2010-05-15'}))
/*
<ListStacksResponse xmlns="http://cloudformation.amazonaws.com/doc/2010-05-15/">
...
*/
request(aws4.sign({service: 'elasticbeanstalk', path: '/?Action=ListAvailableSolutionStacks&Version=2010-12-01'}))
/*
<ListAvailableSolutionStacksResponse xmlns="http://elasticbeanstalk.amazonaws.com/docs/2010-12-01/">
...
*/
request(aws4.sign({service: 'rds', path: '/?Action=DescribeDBInstances&Version=2012-09-17'}))
/*
<DescribeDBInstancesResponse xmlns="http://rds.amazonaws.com/doc/2012-09-17/">
...
*/
request(aws4.sign({service: 'monitoring', path: '/?Action=ListMetrics&Version=2010-08-01'}))
/*
<ListMetricsResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
...
*/
request(aws4.sign({service: 'redshift', path: '/?Action=DescribeClusters&Version=2012-12-01'}))
/*
<DescribeClustersResponse xmlns="http://redshift.amazonaws.com/doc/2012-12-01/">
...
*/
request(aws4.sign({service: 'cloudfront', path: '/2014-05-31/distribution'}))
/*
<DistributionList xmlns="http://cloudfront.amazonaws.com/doc/2014-05-31/">
...
*/
request(aws4.sign({service: 'elasticache', path: '/?Action=DescribeCacheClusters&Version=2014-07-15'}))
/*
<DescribeCacheClustersResponse xmlns="http://elasticache.amazonaws.com/doc/2014-07-15/">
...
*/
request(aws4.sign({service: 'elasticmapreduce', path: '/?Action=DescribeJobFlows&Version=2009-03-31'}))
/*
<DescribeJobFlowsResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
...
*/
request(aws4.sign({service: 'route53', path: '/2013-04-01/hostedzone'}))
/*
<ListHostedZonesResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
...
*/
request(aws4.sign({service: 'appstream', path: '/applications'}))
/*
{"_links":{"curie":[{"href":"http://docs.aws.amazon.com/appstream/latest/...
...
*/
request(aws4.sign({service: 'cognito-sync', path: '/identitypools'}))
/*
{"Count":0,"IdentityPoolUsages":[],"MaxResults":16,"NextToken":null}
...
*/
request(aws4.sign({service: 'elastictranscoder', path: '/2012-09-25/pipelines'}))
/*
{"NextPageToken":null,"Pipelines":[]}
...
*/
request(aws4.sign({service: 'lambda', path: '/2014-11-13/functions/'}))
/*
{"Functions":[],"NextMarker":null}
...
*/
request(aws4.sign({service: 'ecs', path: '/?Action=ListClusters&Version=2014-11-13'}))
/*
<ListClustersResponse xmlns="http://ecs.amazonaws.com/doc/2014-11-13/">
...
*/
request(aws4.sign({service: 'glacier', path: '/-/vaults', headers: {'X-Amz-Glacier-Version': '2012-06-01'}}))
/*
{"Marker":null,"VaultList":[]}
...
*/
request(aws4.sign({service: 'storagegateway', body: '{}', headers: {
'Content-Type': 'application/x-amz-json-1.1',
'X-Amz-Target': 'StorageGateway_20120630.ListGateways'
}}))
/*
{"Gateways":[]}
...
*/
request(aws4.sign({service: 'datapipeline', body: '{}', headers: {
'Content-Type': 'application/x-amz-json-1.1',
'X-Amz-Target': 'DataPipeline.ListPipelines'
}}))
/*
{"hasMoreResults":false,"pipelineIdList":[]}
...
*/
request(aws4.sign({service: 'opsworks', body: '{}', headers: {
'Content-Type': 'application/x-amz-json-1.1',
'X-Amz-Target': 'OpsWorks_20130218.DescribeStacks'
}}))
/*
{"Stacks":[]}
...
*/
request(aws4.sign({service: 'route53domains', body: '{}', headers: {
'Content-Type': 'application/x-amz-json-1.1',
'X-Amz-Target': 'Route53Domains_v20140515.ListDomains'
}}))
/*
{"Domains":[]}
...
*/
request(aws4.sign({service: 'kinesis', body: '{}', headers: {
'Content-Type': 'application/x-amz-json-1.1',
'X-Amz-Target': 'Kinesis_20131202.ListStreams'
}}))
/*
{"HasMoreStreams":false,"StreamNames":[]}
...
*/
request(aws4.sign({service: 'cloudtrail', body: '{}', headers: {
'Content-Type': 'application/x-amz-json-1.1',
'X-Amz-Target': 'CloudTrail_20131101.DescribeTrails'
}}))
/*
{"trailList":[]}
...
*/
request(aws4.sign({service: 'logs', body: '{}', headers: {
'Content-Type': 'application/x-amz-json-1.1',
'X-Amz-Target': 'Logs_20140328.DescribeLogGroups'
}}))
/*
{"logGroups":[]}
...
*/
request(aws4.sign({service: 'codedeploy', body: '{}', headers: {
'Content-Type': 'application/x-amz-json-1.1',
'X-Amz-Target': 'CodeDeploy_20141006.ListApplications'
}}))
/*
{"applications":[]}
...
*/
request(aws4.sign({service: 'directconnect', body: '{}', headers: {
'Content-Type': 'application/x-amz-json-1.1',
'X-Amz-Target': 'OvertureService.DescribeConnections'
}}))
/*
{"connections":[]}
...
*/
request(aws4.sign({service: 'kms', body: '{}', headers: {
'Content-Type': 'application/x-amz-json-1.1',
'X-Amz-Target': 'TrentService.ListKeys'
}}))
/*
{"Keys":[],"Truncated":false}
...
*/
request(aws4.sign({service: 'config', body: '{}', headers: {
'Content-Type': 'application/x-amz-json-1.1',
'X-Amz-Target': 'StarlingDoveService.DescribeDeliveryChannels'
}}))
/*
{"DeliveryChannels":[]}
...
*/
request(aws4.sign({service: 'cloudhsm', body: '{}', headers: {
'Content-Type': 'application/x-amz-json-1.1',
'X-Amz-Target': 'CloudHsmFrontendService.ListAvailableZones'
}}))
/*
{"AZList":["us-east-1a","us-east-1b","us-east-1c"]}
...
*/
request(aws4.sign({
service: 'swf',
body: '{"registrationStatus":"REGISTERED"}',
headers: {
'Content-Type': 'application/x-amz-json-1.0',
'X-Amz-Target': 'SimpleWorkflowService.ListDomains'
}
}))
/*
{"domainInfos":[]}
...
*/
request(aws4.sign({
service: 'cognito-identity',
body: '{"MaxResults": 1}',
headers: {
'Content-Type': 'application/x-amz-json-1.1',
'X-Amz-Target': 'AWSCognitoIdentityService.ListIdentityPools'
}
}))
/*
{"IdentityPools":[]}
...
*/
request(aws4.sign({
service: 'mobileanalytics',
path: '/2014-06-05/events',
body: JSON.stringify({events:[{
eventType: 'a',
timestamp: new Date().toISOString(),
session: {},
}]}),
headers: {
'Content-Type': 'application/json',
'X-Amz-Client-Context': JSON.stringify({
client: {client_id: 'a', app_title: 'a'},
custom: {},
env: {platform: 'a'},
services: {},
}),
}
}))
/*
(HTTP 202, empty response)
*/
// Generate CodeCommit Git access password
var signer = new aws4.RequestSigner({ var signer = new aws4.RequestSigner({
service: 'codecommit', service: 'codecommit',
host: 'git-codecommit.us-east-1.amazonaws.com', host: 'git-codecommit.us-east-1.amazonaws.com',
...@@ -443,6 +102,8 @@ var signer = new aws4.RequestSigner({ ...@@ -443,6 +102,8 @@ var signer = new aws4.RequestSigner({
path: '/v1/repos/MyAwesomeRepo', path: '/v1/repos/MyAwesomeRepo',
}) })
var password = signer.getDateTime() + 'Z' + signer.signature() var password = signer.getDateTime() + 'Z' + signer.signature()
// see example.js for examples with other services
``` ```
API API
...@@ -450,23 +111,23 @@ API ...@@ -450,23 +111,23 @@ API
### aws4.sign(requestOptions, [credentials]) ### aws4.sign(requestOptions, [credentials])
This calculates and populates the `Authorization` header of Calculates and populates any necessary AWS headers and/or request
`requestOptions`, and any other necessary AWS headers and/or request options on `requestOptions`. Returns `requestOptions` as a convenience for chaining.
options. Returns `requestOptions` as a convenience for chaining.
`requestOptions` is an object holding the same options that the node.js `requestOptions` is an object holding the same options that the Node.js
[http.request](http://nodejs.org/docs/latest/api/http.html#http_http_request_options_callback) [http.request](https://nodejs.org/docs/latest/api/http.html#http_http_request_options_callback)
function takes. function takes.
The following properties of `requestOptions` are used in the signing or The following properties of `requestOptions` are used in the signing or
populated if they don't already exist: populated if they don't already exist:
- `hostname` or `host` (will be determined from `service` and `region` if not given) - `hostname` or `host` (will try to be determined from `service` and `region` if not given)
- `method` (will use `'GET'` if not given or `'POST'` if there is a `body`) - `method` (will use `'GET'` if not given or `'POST'` if there is a `body`)
- `path` (will use `'/'` if not given) - `path` (will use `'/'` if not given)
- `body` (will use `''` if not given) - `body` (will use `''` if not given)
- `service` (will be calculated from `hostname` or `host` if not given) - `service` (will try to be calculated from `hostname` or `host` if not given)
- `region` (will be calculated from `hostname` or `host` or use `'us-east-1'` if not given) - `region` (will try to be calculated from `hostname` or `host` or use `'us-east-1'` if not given)
- `signQuery` (to sign the query instead of adding an `Authorization` header, defaults to false)
- `headers['Host']` (will use `hostname` or `host` or be calculated if not given) - `headers['Host']` (will use `hostname` or `host` or be calculated if not given)
- `headers['Content-Type']` (will use `'application/x-www-form-urlencoded; charset=utf-8'` - `headers['Content-Type']` (will use `'application/x-www-form-urlencoded; charset=utf-8'`
if not given and there is a `body`) if not given and there is a `body`)
...@@ -489,20 +150,20 @@ aws4.sign(requestOptions, { ...@@ -489,20 +150,20 @@ aws4.sign(requestOptions, {
- From `process.env`, such as this: - From `process.env`, such as this:
``` ```
export AWS_SECRET_ACCESS_KEY="<your-secret-access-key>"
export AWS_ACCESS_KEY_ID="<your-access-key-id>" export AWS_ACCESS_KEY_ID="<your-access-key-id>"
export AWS_SECRET_ACCESS_KEY="<your-secret-access-key>"
export AWS_SESSION_TOKEN="<your-session-token>" export AWS_SESSION_TOKEN="<your-session-token>"
``` ```
(will also use `AWS_ACCESS_KEY` and `AWS_SECRET_KEY` if available) (will also use `AWS_ACCESS_KEY` and `AWS_SECRET_KEY` if available)
The `sessionToken` property and `AWS_SESSION_TOKEN` environment variable are optional for signing The `sessionToken` property and `AWS_SESSION_TOKEN` environment variable are optional for signing
with [IAM STS temporary credentials](http://docs.aws.amazon.com/STS/latest/UsingSTS/using-temp-creds.html). with [IAM STS temporary credentials](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html).
Installation Installation
------------ ------------
With [npm](http://npmjs.org/) do: With [npm](https://www.npmjs.com/) do:
``` ```
npm install aws4 npm install aws4
...@@ -518,6 +179,5 @@ Thanks to [@jed](https://github.com/jed) for his ...@@ -518,6 +179,5 @@ Thanks to [@jed](https://github.com/jed) for his
committed and subsequently extracted this code. committed and subsequently extracted this code.
Also thanks to the Also thanks to the
[official node.js AWS SDK](https://github.com/aws/aws-sdk-js) for giving [official Node.js AWS SDK](https://github.com/aws/aws-sdk-js) for giving
me a start on implementing the v4 signature. me a start on implementing the v4 signature.
...@@ -26,6 +26,20 @@ function encodeRfc3986Full(str) { ...@@ -26,6 +26,20 @@ function encodeRfc3986Full(str) {
return encodeRfc3986(encodeURIComponent(str)) return encodeRfc3986(encodeURIComponent(str))
} }
// A bit of a combination of:
// https://github.com/aws/aws-sdk-java-v2/blob/dc695de6ab49ad03934e1b02e7263abbd2354be0/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AbstractAws4Signer.java#L59
// https://github.com/aws/aws-sdk-js/blob/18cb7e5b463b46239f9fdd4a65e2ff8c81831e8f/lib/signers/v4.js#L191-L199
// https://github.com/mhart/aws4fetch/blob/b3aed16b6f17384cf36ea33bcba3c1e9f3bdfefd/src/main.js#L25-L34
var HEADERS_TO_IGNORE = {
'authorization': true,
'connection': true,
'x-amzn-trace-id': true,
'user-agent': true,
'expect': true,
'presigned-expires': true,
'range': true,
}
// request: { path | body, [host], [method], [headers], [service], [region] } // request: { path | body, [host], [method], [headers], [service], [region] }
// credentials: { accessKeyId, secretAccessKey, [sessionToken] } // credentials: { accessKeyId, secretAccessKey, [sessionToken] }
function RequestSigner(request, credentials) { function RequestSigner(request, credentials) {
...@@ -33,7 +47,7 @@ function RequestSigner(request, credentials) { ...@@ -33,7 +47,7 @@ function RequestSigner(request, credentials) {
if (typeof request === 'string') request = url.parse(request) if (typeof request === 'string') request = url.parse(request)
var headers = request.headers = (request.headers || {}), var headers = request.headers = (request.headers || {}),
hostParts = this.matchHost(request.hostname || request.host || headers.Host || headers.host) hostParts = (!this.service || !this.region) && this.matchHost(request.hostname || request.host || headers.Host || headers.host)
this.request = request this.request = request
this.credentials = credentials || this.defaultCredentials() this.credentials = credentials || this.defaultCredentials()
...@@ -70,6 +84,19 @@ RequestSigner.prototype.matchHost = function(host) { ...@@ -70,6 +84,19 @@ RequestSigner.prototype.matchHost = function(host) {
if (hostParts[1] === 'es') if (hostParts[1] === 'es')
hostParts = hostParts.reverse() hostParts = hostParts.reverse()
if (hostParts[1] == 's3') {
hostParts[0] = 's3'
hostParts[1] = 'us-east-1'
} else {
for (var i = 0; i < 2; i++) {
if (/^s3-/.test(hostParts[i])) {
hostParts[1] = hostParts[i].slice(3)
hostParts[0] = 's3'
break
}
}
}
return hostParts return hostParts
} }
...@@ -83,10 +110,9 @@ RequestSigner.prototype.isSingleRegion = function() { ...@@ -83,10 +110,9 @@ RequestSigner.prototype.isSingleRegion = function() {
} }
RequestSigner.prototype.createHost = function() { RequestSigner.prototype.createHost = function() {
var region = this.isSingleRegion() ? '' : var region = this.isSingleRegion() ? '' : '.' + this.region,
(this.service === 's3' && this.region !== 'us-east-1' ? '-' : '.') + this.region, subdomain = this.service === 'ses' ? 'email' : this.service
service = this.service === 'ses' ? 'email' : this.service return subdomain + region + '.amazonaws.com'
return service + region + '.amazonaws.com'
} }
RequestSigner.prototype.prepareRequest = function() { RequestSigner.prototype.prepareRequest = function() {
...@@ -247,7 +273,7 @@ RequestSigner.prototype.canonicalString = function() { ...@@ -247,7 +273,7 @@ RequestSigner.prototype.canonicalString = function() {
if (normalizePath && piece === '..') { if (normalizePath && piece === '..') {
path.pop() path.pop()
} else if (!normalizePath || piece !== '.') { } else if (!normalizePath || piece !== '.') {
if (decodePath) piece = decodeURIComponent(piece).replace(/\+/g, ' ') if (decodePath) piece = decodeURIComponent(piece.replace(/\+/g, ' '))
path.push(encodeRfc3986Full(piece)) path.push(encodeRfc3986Full(piece))
} }
return path return path
...@@ -272,6 +298,7 @@ RequestSigner.prototype.canonicalHeaders = function() { ...@@ -272,6 +298,7 @@ RequestSigner.prototype.canonicalHeaders = function() {
return header.toString().trim().replace(/\s+/g, ' ') return header.toString().trim().replace(/\s+/g, ' ')
} }
return Object.keys(headers) return Object.keys(headers)
.filter(function(key) { return HEADERS_TO_IGNORE[key.toLowerCase()] == null })
.sort(function(a, b) { return a.toLowerCase() < b.toLowerCase() ? -1 : 1 }) .sort(function(a, b) { return a.toLowerCase() < b.toLowerCase() ? -1 : 1 })
.map(function(key) { return key.toLowerCase() + ':' + trimAll(headers[key]) }) .map(function(key) { return key.toLowerCase() + ':' + trimAll(headers[key]) })
.join('\n') .join('\n')
...@@ -280,6 +307,7 @@ RequestSigner.prototype.canonicalHeaders = function() { ...@@ -280,6 +307,7 @@ RequestSigner.prototype.canonicalHeaders = function() {
RequestSigner.prototype.signedHeaders = function() { RequestSigner.prototype.signedHeaders = function() {
return Object.keys(this.request.headers) return Object.keys(this.request.headers)
.map(function(key) { return key.toLowerCase() }) .map(function(key) { return key.toLowerCase() })
.filter(function(key) { return HEADERS_TO_IGNORE[key] == null })
.sort() .sort()
.join(';') .join(';')
} }
......
{ {
"_from": "aws4@^1.8.0", "_from": "aws4@^1.2.1",
"_id": "aws4@1.9.1", "_id": "aws4@1.11.0",
"_inBundle": false, "_inBundle": false,
"_integrity": "sha512-wMHVg2EOHaMRxbzgFJ9gtjOOCrI80OHLG14rxi28XwOW8ux6IiEbRCGGGqCtdAIg4FQCbW20k9RsT4y3gJlFug==", "_integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA==",
"_location": "/aws4", "_location": "/aws4",
"_phantomChildren": {}, "_phantomChildren": {},
"_requested": { "_requested": {
"type": "range", "type": "range",
"registry": true, "registry": true,
"raw": "aws4@^1.8.0", "raw": "aws4@^1.2.1",
"name": "aws4", "name": "aws4",
"escapedName": "aws4", "escapedName": "aws4",
"rawSpec": "^1.8.0", "rawSpec": "^1.2.1",
"saveSpec": null, "saveSpec": null,
"fetchSpec": "^1.8.0" "fetchSpec": "^1.2.1"
}, },
"_requiredBy": [ "_requiredBy": [
"/request" "/request"
], ],
"_resolved": "https://registry.npmjs.org/aws4/-/aws4-1.9.1.tgz", "_resolved": "https://registry.npmjs.org/aws4/-/aws4-1.11.0.tgz",
"_shasum": "7e33d8f7d449b3f673cd72deb9abdc552dbe528e", "_shasum": "d61f46d83b2519250e2784daf5b09479a8b41c59",
"_spec": "aws4@^1.8.0", "_spec": "aws4@^1.2.1",
"_where": "C:\\Work\\OneDrive - bwstaff\\M4_Lab\\TV3\\NewVersion01\\dev\\node_modules\\request", "_where": "C:\\Work\\OneDrive - bwstaff\\M4_Lab\\Main\\02_Plattform_Main\\m4labplatform\\node_modules\\request",
"author": { "author": {
"name": "Michael Hart", "name": "Michael Hart",
"email": "michael.hart.au@gmail.com", "email": "michael.hart.au@gmail.com",
"url": "http://github.com/mhart" "url": "https://github.com/mhart"
}, },
"bugs": { "bugs": {
"url": "https://github.com/mhart/aws4/issues" "url": "https://github.com/mhart/aws4/issues"
...@@ -34,62 +34,10 @@ ...@@ -34,62 +34,10 @@
"deprecated": false, "deprecated": false,
"description": "Signs and prepares requests using AWS Signature Version 4", "description": "Signs and prepares requests using AWS Signature Version 4",
"devDependencies": { "devDependencies": {
"mocha": "^2.4.5", "mocha": "^2.5.3",
"should": "^8.2.2" "should": "^8.4.0"
}, },
"homepage": "https://github.com/mhart/aws4#readme", "homepage": "https://github.com/mhart/aws4#readme",
"keywords": [
"amazon",
"aws",
"signature",
"s3",
"ec2",
"autoscaling",
"cloudformation",
"elasticloadbalancing",
"elb",
"elasticbeanstalk",
"cloudsearch",
"dynamodb",
"kinesis",
"lambda",
"glacier",
"sqs",
"sns",
"iam",
"sts",
"ses",
"swf",
"storagegateway",
"datapipeline",
"directconnect",
"redshift",
"opsworks",
"rds",
"monitoring",
"cloudtrail",
"cloudfront",
"codedeploy",
"elasticache",
"elasticmapreduce",
"elastictranscoder",
"emr",
"cloudwatch",
"mobileanalytics",
"cognitoidentity",
"cognitosync",
"cognito",
"containerservice",
"ecs",
"appstream",
"keymanagementservice",
"kms",
"config",
"cloudhsm",
"route53",
"route53domains",
"logs"
],
"license": "MIT", "license": "MIT",
"main": "aws4.js", "main": "aws4.js",
"name": "aws4", "name": "aws4",
...@@ -99,7 +47,7 @@ ...@@ -99,7 +47,7 @@
}, },
"scripts": { "scripts": {
"integration": "node ./test/slow.js", "integration": "node ./test/slow.js",
"test": "mocha ./test/fast.js -b -t 100s -R list" "test": "mocha ./test/fast.js -R list"
}, },
"version": "1.9.1" "version": "1.11.0"
} }
node_modules/
Copyright (c) Isaac Z. Schlueter ("Author") The ISC License
All rights reserved.
The BSD License Copyright (c) Isaac Z. Schlueter, Ben Noordhuis, and Contributors
Redistribution and use in source and binary forms, with or without Permission to use, copy, modify, and/or distribute this software for any
modification, are permitted provided that the following conditions purpose with or without fee is hereby granted, provided that the above
are met: copyright notice and this permission notice appear in all copies.
1. Redistributions of source code must retain the above copyright THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
notice, this list of conditions and the following disclaimer. WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
2. Redistributions in binary form must reproduce the above copyright ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
notice, this list of conditions and the following disclaimer in the WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
documentation and/or other materials provided with the distribution. ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
...@@ -7,14 +7,11 @@ The improvements are meant to normalize behavior across different ...@@ -7,14 +7,11 @@ The improvements are meant to normalize behavior across different
platforms and environments, and to make filesystem access more platforms and environments, and to make filesystem access more
resilient to errors. resilient to errors.
## Improvements over fs module ## Improvements over [fs module](https://nodejs.org/api/fs.html)
graceful-fs: * Queues up `open` and `readdir` calls, and retries them once
something closes if there is an EMFILE error from too many file
* keeps track of how many file descriptors are open, and by default descriptors.
limits this to 1024. Any further requests to open a file are put in a
queue until new slots become available. If 1024 turns out to be too
much, it decreases the limit further.
* fixes `lchmod` for Node versions prior to 0.6.2. * fixes `lchmod` for Node versions prior to 0.6.2.
* implements `fs.lutimes` if possible. Otherwise it becomes a noop. * implements `fs.lutimes` if possible. Otherwise it becomes a noop.
* ignores `EINVAL` and `EPERM` errors in `chown`, `fchown` or * ignores `EINVAL` and `EPERM` errors in `chown`, `fchown` or
...@@ -26,8 +23,111 @@ On Windows, it retries renaming a file for up to one second if `EACCESS` ...@@ -26,8 +23,111 @@ On Windows, it retries renaming a file for up to one second if `EACCESS`
or `EPERM` error occurs, likely because antivirus software has locked or `EPERM` error occurs, likely because antivirus software has locked
the directory. the directory.
## Configuration ## USAGE
```javascript
// use just like fs
var fs = require('graceful-fs')
// now go and do stuff with it...
fs.readFileSync('some-file-or-whatever')
```
## Global Patching
If you want to patch the global fs module (or any other fs-like
module) you can do this:
```javascript
// Make sure to read the caveat below.
var realFs = require('fs')
var gracefulFs = require('graceful-fs')
gracefulFs.gracefulify(realFs)
```
This should only ever be done at the top-level application layer, in
order to delay on EMFILE errors from any fs-using dependencies. You
should **not** do this in a library, because it can cause unexpected
delays in other parts of the program.
## Changes
This module is fairly stable at this point, and used by a lot of
things. That being said, because it implements a subtle behavior
change in a core part of the node API, even modest changes can be
extremely breaking, and the versioning is thus biased towards
bumping the major when in doubt.
The main change between major versions has been switching between
providing a fully-patched `fs` module vs monkey-patching the node core
builtin, and the approach by which a non-monkey-patched `fs` was
created.
The goal is to trade `EMFILE` errors for slower fs operations. So, if
you try to open a zillion files, rather than crashing, `open`
operations will be queued up and wait for something else to `close`.
There are advantages to each approach. Monkey-patching the fs means
that no `EMFILE` errors can possibly occur anywhere in your
application, because everything is using the same core `fs` module,
which is patched. However, it can also obviously cause undesirable
side-effects, especially if the module is loaded multiple times.
Implementing a separate-but-identical patched `fs` module is more
surgical (and doesn't run the risk of patching multiple times), but
also imposes the challenge of keeping in sync with the core module.
The current approach loads the `fs` module, and then creates a
lookalike object that has all the same methods, except a few that are
patched. It is safe to use in all versions of Node from 0.8 through
7.0.
### v4
* Do not monkey-patch the fs module. This module may now be used as a
drop-in dep, and users can opt into monkey-patching the fs builtin
if their app requires it.
### v3
* Monkey-patch fs, because the eval approach no longer works on recent
node.
* fixed possible type-error throw if rename fails on windows
* verify that we *never* get EMFILE errors
* Ignore ENOSYS from chmod/chown
* clarify that graceful-fs must be used as a drop-in
### v2.1.0
* Use eval rather than monkey-patching fs.
* readdir: Always sort the results
* win32: requeue a file if error has an OK status
### v2.0
* A return to monkey patching
* wrap process.cwd
### v1.1
* wrap readFile
* Wrap fs.writeFile.
* readdir protection
* Don't clobber the fs builtin
* Handle fs.read EAGAIN errors by trying again
* Expose the curOpen counter
* No-op lchown/lchmod if not implemented
* fs.rename patch only for win32
* Patch fs.rename to handle AV software on Windows
* Close #4 Chown should not fail on einval or eperm if non-root
* Fix isaacs/fstream#1 Only wrap fs one time
* Fix #3 Start at 1024 max files, then back off on EMFILE
* lutimes that doens't blow up on Linux
* A full on-rewrite using a queue instead of just swallowing the EMFILE error
* Wrap Read/Write streams as well
### 1.0
The maximum number of open file descriptors that graceful-fs manages may * Update engines for node 0.6
be adjusted by setting `fs.MAX_OPEN` to a different number. The default * Be lstat-graceful on Windows
is 1024. * first
// this keeps a queue of opened file descriptors, and will make var fs = require('fs')
// fs operations wait until some have closed before trying to open more. var polyfills = require('./polyfills.js')
var legacy = require('./legacy-streams.js')
var fs = exports = module.exports = {} var clone = require('./clone.js')
fs._originalFs = require("fs")
var util = require('util')
Object.getOwnPropertyNames(fs._originalFs).forEach(function(prop) {
var desc = Object.getOwnPropertyDescriptor(fs._originalFs, prop) /* istanbul ignore next - node 0.x polyfill */
Object.defineProperty(fs, prop, desc) var gracefulQueue
}) var previousSymbol
var queue = [] /* istanbul ignore else - node 0.x polyfill */
, constants = require("constants") if (typeof Symbol === 'function' && typeof Symbol.for === 'function') {
gracefulQueue = Symbol.for('graceful-fs.queue')
fs._curOpen = 0 // This is used in testing by future versions
previousSymbol = Symbol.for('graceful-fs.previous')
fs.MIN_MAX_OPEN = 64 } else {
fs.MAX_OPEN = 1024 gracefulQueue = '___graceful-fs.queue'
previousSymbol = '___graceful-fs.previous'
// prevent EMFILE errors
function OpenReq (path, flags, mode, cb) {
this.path = path
this.flags = flags
this.mode = mode
this.cb = cb
} }
function noop () {} function noop () {}
fs.open = gracefulOpen var debug = noop
if (util.debuglog)
function gracefulOpen (path, flags, mode, cb) { debug = util.debuglog('gfs4')
if (typeof mode === "function") cb = mode, mode = null else if (/\bgfs4\b/i.test(process.env.NODE_DEBUG || ''))
if (typeof cb !== "function") cb = noop debug = function() {
var m = util.format.apply(util, arguments)
if (fs._curOpen >= fs.MAX_OPEN) { m = 'GFS4: ' + m.split(/\n/).join('\nGFS4: ')
queue.push(new OpenReq(path, flags, mode, cb)) console.error(m)
setTimeout(flush)
return
}
open(path, flags, mode, function (er, fd) {
if (er && er.code === "EMFILE" && fs._curOpen > fs.MIN_MAX_OPEN) {
// that was too many. reduce max, get back in queue.
// this should only happen once in a great while, and only
// if the ulimit -n is set lower than 1024.
fs.MAX_OPEN = fs._curOpen - 1
return fs.open(path, flags, mode, cb)
} }
cb(er, fd)
})
}
function open (path, flags, mode, cb) { // Once time initialization
cb = cb || noop if (!global[gracefulQueue]) {
fs._curOpen ++ // This queue can be shared by multiple loaded instances
fs._originalFs.open.call(fs, path, flags, mode, function (er, fd) { var queue = []
if (er) onclose() Object.defineProperty(global, gracefulQueue, {
cb(er, fd) get: function() {
return queue
}
}) })
}
fs.openSync = function (path, flags, mode) { // Patch fs.close/closeSync to shared queue version, because we need
var ret // to retry() whenever a close happens *anywhere* in the program.
ret = fs._originalFs.openSync.call(fs, path, flags, mode) // This is essential when multiple graceful-fs instances are
fs._curOpen ++ // in play at the same time.
return ret fs.close = (function (fs$close) {
} function close (fd, cb) {
return fs$close.call(fs, fd, function (err) {
function onclose () { // This function uses the graceful-fs shared queue
fs._curOpen -- if (!err) {
flush() retry()
}
function flush () {
while (fs._curOpen < fs.MAX_OPEN) {
var req = queue.shift()
if (!req) return
switch (req.constructor.name) {
case 'OpenReq':
open(req.path, req.flags || "r", req.mode || 0777, req.cb)
break
case 'ReaddirReq':
readdir(req.path, req.cb)
break
case 'ReadFileReq':
readFile(req.path, req.options, req.cb)
break
case 'WriteFileReq':
writeFile(req.path, req.data, req.options, req.cb)
break
default:
throw new Error('Unknown req type: ' + req.constructor.name)
}
} }
}
fs.close = function (fd, cb) { if (typeof cb === 'function')
cb = cb || noop cb.apply(this, arguments)
fs._originalFs.close.call(fs, fd, function (er) {
onclose()
cb(er)
}) })
}
fs.closeSync = function (fd) {
try {
return fs._originalFs.closeSync.call(fs, fd)
} finally {
onclose()
} }
}
Object.defineProperty(close, previousSymbol, {
value: fs$close
})
return close
})(fs.close)
// readdir takes a fd as well. fs.closeSync = (function (fs$closeSync) {
// however, the sync version closes it right away, so function closeSync (fd) {
// there's no need to wrap. // This function uses the graceful-fs shared queue
// It would be nice to catch when it throws an EMFILE, fs$closeSync.apply(fs, arguments)
// but that's relatively rare anyway. retry()
fs.readdir = gracefulReaddir
function gracefulReaddir (path, cb) {
if (fs._curOpen >= fs.MAX_OPEN) {
queue.push(new ReaddirReq(path, cb))
setTimeout(flush)
return
} }
readdir(path, function (er, files) { Object.defineProperty(closeSync, previousSymbol, {
if (er && er.code === "EMFILE" && fs._curOpen > fs.MIN_MAX_OPEN) { value: fs$closeSync
fs.MAX_OPEN = fs._curOpen - 1
return fs.readdir(path, cb)
}
cb(er, files)
}) })
} return closeSync
})(fs.closeSync)
function readdir (path, cb) { if (/\bgfs4\b/i.test(process.env.NODE_DEBUG || '')) {
cb = cb || noop process.on('exit', function() {
fs._curOpen ++ debug(global[gracefulQueue])
fs._originalFs.readdir.call(fs, path, function (er, files) { require('assert').equal(global[gracefulQueue].length, 0)
onclose()
cb(er, files)
}) })
}
} }
function ReaddirReq (path, cb) { module.exports = patch(clone(fs))
this.path = path if (process.env.TEST_GRACEFUL_FS_GLOBAL_PATCH && !fs.__patched) {
this.cb = cb module.exports = patch(fs)
fs.__patched = true;
} }
function patch (fs) {
fs.readFile = gracefulReadFile // Everything that references the open() function needs to be in here
polyfills(fs)
function gracefulReadFile(path, options, cb) { fs.gracefulify = patch
if (typeof options === "function") cb = options, options = null
if (typeof cb !== "function") cb = noop fs.createReadStream = createReadStream
fs.createWriteStream = createWriteStream
if (fs._curOpen >= fs.MAX_OPEN) { var fs$readFile = fs.readFile
queue.push(new ReadFileReq(path, options, cb)) fs.readFile = readFile
setTimeout(flush) function readFile (path, options, cb) {
return if (typeof options === 'function')
cb = options, options = null
return go$readFile(path, options, cb)
function go$readFile (path, options, cb) {
return fs$readFile(path, options, function (err) {
if (err && (err.code === 'EMFILE' || err.code === 'ENFILE'))
enqueue([go$readFile, [path, options, cb]])
else {
if (typeof cb === 'function')
cb.apply(this, arguments)
retry()
} }
readFile(path, options, function (er, data) {
if (er && er.code === "EMFILE" && fs._curOpen > fs.MIN_MAX_OPEN) {
fs.MAX_OPEN = fs._curOpen - 1
return fs.readFile(path, options, cb)
}
cb(er, data)
})
}
function readFile (path, options, cb) {
cb = cb || noop
fs._curOpen ++
fs._originalFs.readFile.call(fs, path, options, function (er, data) {
onclose()
cb(er, data)
}) })
} }
}
function ReadFileReq (path, options, cb) {
this.path = path
this.options = options
this.cb = cb
}
var fs$writeFile = fs.writeFile
fs.writeFile = writeFile
function writeFile (path, data, options, cb) {
if (typeof options === 'function')
cb = options, options = null
return go$writeFile(path, data, options, cb)
fs.writeFile = gracefulWriteFile function go$writeFile (path, data, options, cb) {
return fs$writeFile(path, data, options, function (err) {
if (err && (err.code === 'EMFILE' || err.code === 'ENFILE'))
enqueue([go$writeFile, [path, data, options, cb]])
else {
if (typeof cb === 'function')
cb.apply(this, arguments)
retry()
}
})
}
}
function gracefulWriteFile(path, data, options, cb) { var fs$appendFile = fs.appendFile
if (typeof options === "function") cb = options, options = null if (fs$appendFile)
if (typeof cb !== "function") cb = noop fs.appendFile = appendFile
function appendFile (path, data, options, cb) {
if (typeof options === 'function')
cb = options, options = null
if (fs._curOpen >= fs.MAX_OPEN) { return go$appendFile(path, data, options, cb)
queue.push(new WriteFileReq(path, data, options, cb))
setTimeout(flush)
return
}
writeFile(path, data, options, function (er) { function go$appendFile (path, data, options, cb) {
if (er && er.code === "EMFILE" && fs._curOpen > fs.MIN_MAX_OPEN) { return fs$appendFile(path, data, options, function (err) {
fs.MAX_OPEN = fs._curOpen - 1 if (err && (err.code === 'EMFILE' || err.code === 'ENFILE'))
return fs.writeFile(path, data, options, cb) enqueue([go$appendFile, [path, data, options, cb]])
else {
if (typeof cb === 'function')
cb.apply(this, arguments)
retry()
} }
cb(er)
})
}
function writeFile (path, data, options, cb) {
cb = cb || noop
fs._curOpen ++
fs._originalFs.writeFile.call(fs, path, data, options, function (er) {
onclose()
cb(er)
}) })
} }
}
function WriteFileReq (path, data, options, cb) { var fs$readdir = fs.readdir
this.path = path fs.readdir = readdir
this.data = data function readdir (path, options, cb) {
this.options = options var args = [path]
this.cb = cb if (typeof options !== 'function') {
} args.push(options)
} else {
cb = options
}
args.push(go$readdir$cb)
return go$readdir(args)
// (re-)implement some things that are known busted or missing. function go$readdir$cb (err, files) {
if (files && files.sort)
files.sort()
var constants = require("constants") if (err && (err.code === 'EMFILE' || err.code === 'ENFILE'))
enqueue([go$readdir, [args]])
// lchmod, broken prior to 0.6.2 else {
// back-port the fix here. if (typeof cb === 'function')
if (constants.hasOwnProperty('O_SYMLINK') && cb.apply(this, arguments)
process.version.match(/^v0\.6\.[0-2]|^v0\.5\./)) { retry()
fs.lchmod = function (path, mode, callback) { }
callback = callback || noop }
fs.open( path
, constants.O_WRONLY | constants.O_SYMLINK
, mode
, function (err, fd) {
if (err) {
callback(err)
return
}
// prefer to return the chmod error, if one occurs,
// but still try to close, and report closing errors if they occur.
fs.fchmod(fd, mode, function (err) {
fs.close(fd, function(err2) {
callback(err || err2)
})
})
})
} }
fs.lchmodSync = function (path, mode) { function go$readdir (args) {
var fd = fs.openSync(path, constants.O_WRONLY | constants.O_SYMLINK, mode) return fs$readdir.apply(fs, args)
// prefer to return the chmod error, if one occurs,
// but still try to close, and report closing errors if they occur.
var err, err2
try {
var ret = fs.fchmodSync(fd, mode)
} catch (er) {
err = er
} }
try {
fs.closeSync(fd) if (process.version.substr(0, 4) === 'v0.8') {
} catch (er) { var legStreams = legacy(fs)
err2 = er ReadStream = legStreams.ReadStream
WriteStream = legStreams.WriteStream
} }
if (err || err2) throw (err || err2)
return ret var fs$ReadStream = fs.ReadStream
if (fs$ReadStream) {
ReadStream.prototype = Object.create(fs$ReadStream.prototype)
ReadStream.prototype.open = ReadStream$open
} }
}
var fs$WriteStream = fs.WriteStream
if (fs$WriteStream) {
WriteStream.prototype = Object.create(fs$WriteStream.prototype)
WriteStream.prototype.open = WriteStream$open
}
// lutimes implementation, or no-op Object.defineProperty(fs, 'ReadStream', {
if (!fs.lutimes) { get: function () {
if (constants.hasOwnProperty("O_SYMLINK")) { return ReadStream
fs.lutimes = function (path, at, mt, cb) { },
fs.open(path, constants.O_SYMLINK, function (er, fd) { set: function (val) {
cb = cb || noop ReadStream = val
if (er) return cb(er) },
fs.futimes(fd, at, mt, function (er) { enumerable: true,
fs.close(fd, function (er2) { configurable: true
return cb(er || er2) })
Object.defineProperty(fs, 'WriteStream', {
get: function () {
return WriteStream
},
set: function (val) {
WriteStream = val
},
enumerable: true,
configurable: true
}) })
// legacy names
var FileReadStream = ReadStream
Object.defineProperty(fs, 'FileReadStream', {
get: function () {
return FileReadStream
},
set: function (val) {
FileReadStream = val
},
enumerable: true,
configurable: true
}) })
var FileWriteStream = WriteStream
Object.defineProperty(fs, 'FileWriteStream', {
get: function () {
return FileWriteStream
},
set: function (val) {
FileWriteStream = val
},
enumerable: true,
configurable: true
}) })
function ReadStream (path, options) {
if (this instanceof ReadStream)
return fs$ReadStream.apply(this, arguments), this
else
return ReadStream.apply(Object.create(ReadStream.prototype), arguments)
} }
fs.lutimesSync = function (path, at, mt) { function ReadStream$open () {
var fd = fs.openSync(path, constants.O_SYMLINK) var that = this
, err open(that.path, that.flags, that.mode, function (err, fd) {
, err2 if (err) {
, ret if (that.autoClose)
that.destroy()
try { that.emit('error', err)
var ret = fs.futimesSync(fd, at, mt) } else {
} catch (er) { that.fd = fd
err = er that.emit('open', fd)
} that.read()
try {
fs.closeSync(fd)
} catch (er) {
err2 = er
}
if (err || err2) throw (err || err2)
return ret
} }
})
} else if (fs.utimensat && constants.hasOwnProperty("AT_SYMLINK_NOFOLLOW")) {
// maybe utimensat will be bound soonish?
fs.lutimes = function (path, at, mt, cb) {
fs.utimensat(path, at, mt, constants.AT_SYMLINK_NOFOLLOW, cb)
} }
fs.lutimesSync = function (path, at, mt) { function WriteStream (path, options) {
return fs.utimensatSync(path, at, mt, constants.AT_SYMLINK_NOFOLLOW) if (this instanceof WriteStream)
return fs$WriteStream.apply(this, arguments), this
else
return WriteStream.apply(Object.create(WriteStream.prototype), arguments)
} }
function WriteStream$open () {
var that = this
open(that.path, that.flags, that.mode, function (err, fd) {
if (err) {
that.destroy()
that.emit('error', err)
} else { } else {
fs.lutimes = function (_a, _b, _c, cb) { process.nextTick(cb) } that.fd = fd
fs.lutimesSync = function () {} that.emit('open', fd)
} }
}
// https://github.com/isaacs/node-graceful-fs/issues/4
// Chown should not fail on einval or eperm if non-root.
fs.chown = chownFix(fs.chown)
fs.fchown = chownFix(fs.fchown)
fs.lchown = chownFix(fs.lchown)
fs.chownSync = chownFixSync(fs.chownSync)
fs.fchownSync = chownFixSync(fs.fchownSync)
fs.lchownSync = chownFixSync(fs.lchownSync)
function chownFix (orig) {
if (!orig) return orig
return function (target, uid, gid, cb) {
return orig.call(fs, target, uid, gid, function (er, res) {
if (chownErOk(er)) er = null
cb(er, res)
}) })
} }
}
function chownFixSync (orig) { function createReadStream (path, options) {
if (!orig) return orig return new fs.ReadStream(path, options)
return function (target, uid, gid) {
try {
return orig.call(fs, target, uid, gid)
} catch (er) {
if (!chownErOk(er)) throw er
}
} }
}
function chownErOk (er) {
// if there's no getuid, or if getuid() is something other than 0,
// and the error is EINVAL or EPERM, then just ignore it.
// This specific case is a silent failure in cp, install, tar,
// and most other unix tools that manage permissions.
// When running as root, or if other types of errors are encountered,
// then it's strict.
if (!er || (!process.getuid || process.getuid() !== 0)
&& (er.code === "EINVAL" || er.code === "EPERM")) return true
}
function createWriteStream (path, options) {
// if lchmod/lchown do not exist, then make them no-ops return new fs.WriteStream(path, options)
if (!fs.lchmod) {
fs.lchmod = function (path, mode, cb) {
process.nextTick(cb)
}
fs.lchmodSync = function () {}
}
if (!fs.lchown) {
fs.lchown = function (path, uid, gid, cb) {
process.nextTick(cb)
} }
fs.lchownSync = function () {}
}
var fs$open = fs.open
fs.open = open
function open (path, flags, mode, cb) {
if (typeof mode === 'function')
cb = mode, mode = null
return go$open(path, flags, mode, cb)
// on Windows, A/V software can lock the directory, causing this function go$open (path, flags, mode, cb) {
// to fail with an EACCES or EPERM if the directory contains newly return fs$open(path, flags, mode, function (err, fd) {
// created files. Try again on failure, for up to 1 second. if (err && (err.code === 'EMFILE' || err.code === 'ENFILE'))
if (process.platform === "win32") { enqueue([go$open, [path, flags, mode, cb]])
var rename_ = fs.rename else {
fs.rename = function rename (from, to, cb) { if (typeof cb === 'function')
var start = Date.now() cb.apply(this, arguments)
rename_(from, to, function CB (er) { retry()
if (er
&& (er.code === "EACCES" || er.code === "EPERM")
&& Date.now() - start < 1000) {
return rename_(from, to, CB)
} }
cb(er)
}) })
} }
} }
return fs
}
// if read() returns EAGAIN, then just try it again. function enqueue (elem) {
var read = fs.read debug('ENQUEUE', elem[0].name, elem[1])
fs.read = function (fd, buffer, offset, length, position, callback_) { global[gracefulQueue].push(elem)
var callback
if (callback_ && typeof callback_ === 'function') {
var eagCounter = 0
callback = function (er, _, __) {
if (er && er.code === 'EAGAIN' && eagCounter < 10) {
eagCounter ++
return read.call(fs, fd, buffer, offset, length, position, callback)
}
callback_.apply(this, arguments)
}
}
return read.call(fs, fd, buffer, offset, length, position, callback)
} }
var readSync = fs.readSync function retry () {
fs.readSync = function (fd, buffer, offset, length, position) { var elem = global[gracefulQueue].shift()
var eagCounter = 0 if (elem) {
while (true) { debug('RETRY', elem[0].name, elem[1])
try { elem[0].apply(null, elem[1])
return readSync.call(fs, fd, buffer, offset, length, position)
} catch (er) {
if (er.code === 'EAGAIN' && eagCounter < 10) {
eagCounter ++
continue
}
throw er
}
} }
} }
{ {
"_from": "graceful-fs@~1.2.0", "_development": true,
"_id": "graceful-fs@1.2.3", "_from": "graceful-fs@4.2.3",
"_id": "graceful-fs@4.2.3",
"_inBundle": false, "_inBundle": false,
"_integrity": "sha1-FaSAaldUfLLS2/J/QuiajDRRs2Q=", "_integrity": "sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ==",
"_location": "/globule/graceful-fs", "_location": "/globule/graceful-fs",
"_phantomChildren": {}, "_phantomChildren": {},
"_requested": { "_requested": {
"type": "range", "type": "version",
"registry": true, "registry": true,
"raw": "graceful-fs@~1.2.0", "raw": "graceful-fs@4.2.3",
"name": "graceful-fs", "name": "graceful-fs",
"escapedName": "graceful-fs", "escapedName": "graceful-fs",
"rawSpec": "~1.2.0", "rawSpec": "4.2.3",
"saveSpec": null, "saveSpec": null,
"fetchSpec": "~1.2.0" "fetchSpec": "4.2.3"
},
"_requiredBy": [
"/globule/glob"
],
"_resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-1.2.3.tgz",
"_shasum": "15a4806a57547cb2d2dbf27f42e89a8c3451b364",
"_spec": "graceful-fs@~1.2.0",
"_where": "C:\\Work\\OneDrive - bwstaff\\M4_Lab\\TV3\\NewVersion01\\LAFJLBmf939XYm5gj\\dev\\node_modules\\globule\\node_modules\\glob",
"author": {
"name": "Isaac Z. Schlueter",
"email": "i@izs.me",
"url": "http://blog.izs.me"
}, },
"_requiredBy": [],
"_resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.3.tgz",
"_shasum": "4a12ff1b60376ef09862c2093edd908328be8423",
"_spec": "graceful-fs@4.2.3",
"_where": "C:\\Work\\OneDrive - bwstaff\\M4_Lab\\Main\\02_Plattform_Main\\m4labplatform",
"bugs": { "bugs": {
"url": "https://github.com/isaacs/node-graceful-fs/issues" "url": "https://github.com/isaacs/node-graceful-fs/issues"
}, },
"bundleDependencies": false, "bundleDependencies": false,
"deprecated": "please upgrade to graceful-fs 4 for compatibility with current and future versions of Node.js", "dependencies": {},
"deprecated": false,
"description": "A drop-in replacement for fs, making various improvements.", "description": "A drop-in replacement for fs, making various improvements.",
"devDependencies": {
"import-fresh": "^2.0.0",
"mkdirp": "^0.5.0",
"rimraf": "^2.2.8",
"tap": "^12.7.0"
},
"directories": { "directories": {
"test": "test" "test": "test"
}, },
"engines": { "files": [
"node": ">=0.4.0" "fs.js",
}, "graceful-fs.js",
"legacy-streams.js",
"polyfills.js",
"clone.js"
],
"homepage": "https://github.com/isaacs/node-graceful-fs#readme", "homepage": "https://github.com/isaacs/node-graceful-fs#readme",
"keywords": [ "keywords": [
"fs", "fs",
...@@ -56,15 +61,18 @@ ...@@ -56,15 +61,18 @@
"EPERM", "EPERM",
"EACCESS" "EACCESS"
], ],
"license": "BSD", "license": "ISC",
"main": "graceful-fs.js", "main": "graceful-fs.js",
"name": "graceful-fs", "name": "graceful-fs",
"repository": { "repository": {
"type": "git", "type": "git",
"url": "git://github.com/isaacs/node-graceful-fs.git" "url": "git+https://github.com/isaacs/node-graceful-fs.git"
}, },
"scripts": { "scripts": {
"test": "tap test/*.js" "postpublish": "git push origin --follow-tags",
"postversion": "npm publish",
"preversion": "npm test",
"test": "node test.js | tap -"
}, },
"version": "1.2.3" "version": "4.2.3"
} }
var test = require('tap').test
var fs = require('../graceful-fs.js')
test('graceful fs is not fs', function (t) {
t.notEqual(fs, require('fs'))
t.end()
})
test('open an existing file works', function (t) {
var start = fs._curOpen
var fd = fs.openSync(__filename, 'r')
t.equal(fs._curOpen, start + 1)
fs.closeSync(fd)
t.equal(fs._curOpen, start)
fs.open(__filename, 'r', function (er, fd) {
if (er) throw er
t.equal(fs._curOpen, start + 1)
fs.close(fd, function (er) {
if (er) throw er
t.equal(fs._curOpen, start)
t.end()
})
})
})
test('open a non-existing file throws', function (t) {
var start = fs._curOpen
var er
try {
var fd = fs.openSync('this file does not exist', 'r')
} catch (x) {
er = x
}
t.ok(er, 'should throw')
t.notOk(fd, 'should not get an fd')
t.equal(er.code, 'ENOENT')
t.equal(fs._curOpen, start)
fs.open('neither does this file', 'r', function (er, fd) {
t.ok(er, 'should throw')
t.notOk(fd, 'should not get an fd')
t.equal(er.code, 'ENOENT')
t.equal(fs._curOpen, start)
t.end()
})
})
var test = require('tap').test
// simulated ulimit
// this is like graceful-fs, but in reverse
var fs_ = require('fs')
var fs = require('../graceful-fs.js')
var files = fs.readdirSync(__dirname)
// Ok, no more actual file reading!
var fds = 0
var nextFd = 60
var limit = 8
fs_.open = function (path, flags, mode, cb) {
process.nextTick(function() {
++fds
if (fds >= limit) {
--fds
var er = new Error('EMFILE Curses!')
er.code = 'EMFILE'
er.path = path
return cb(er)
} else {
cb(null, nextFd++)
}
})
}
fs_.openSync = function (path, flags, mode) {
if (fds >= limit) {
var er = new Error('EMFILE Curses!')
er.code = 'EMFILE'
er.path = path
throw er
} else {
++fds
return nextFd++
}
}
fs_.close = function (fd, cb) {
process.nextTick(function () {
--fds
cb()
})
}
fs_.closeSync = function (fd) {
--fds
}
fs_.readdir = function (path, cb) {
process.nextTick(function() {
if (fds >= limit) {
var er = new Error('EMFILE Curses!')
er.code = 'EMFILE'
er.path = path
return cb(er)
} else {
++fds
process.nextTick(function () {
--fds
cb(null, [__filename, "some-other-file.js"])
})
}
})
}
fs_.readdirSync = function (path) {
if (fds >= limit) {
var er = new Error('EMFILE Curses!')
er.code = 'EMFILE'
er.path = path
throw er
} else {
return [__filename, "some-other-file.js"]
}
}
test('open emfile autoreduce', function (t) {
fs.MIN_MAX_OPEN = 4
t.equal(fs.MAX_OPEN, 1024)
var max = 12
for (var i = 0; i < max; i++) {
fs.open(__filename, 'r', next(i))
}
var phase = 0
var expect =
[ [ 0, 60, null, 1024, 4, 12, 1 ],
[ 1, 61, null, 1024, 4, 12, 2 ],
[ 2, 62, null, 1024, 4, 12, 3 ],
[ 3, 63, null, 1024, 4, 12, 4 ],
[ 4, 64, null, 1024, 4, 12, 5 ],
[ 5, 65, null, 1024, 4, 12, 6 ],
[ 6, 66, null, 1024, 4, 12, 7 ],
[ 7, 67, null, 6, 4, 5, 1 ],
[ 8, 68, null, 6, 4, 5, 2 ],
[ 9, 69, null, 6, 4, 5, 3 ],
[ 10, 70, null, 6, 4, 5, 4 ],
[ 11, 71, null, 6, 4, 5, 5 ] ]
var actual = []
function next (i) { return function (er, fd) {
if (er)
throw er
actual.push([i, fd, er, fs.MAX_OPEN, fs.MIN_MAX_OPEN, fs._curOpen, fds])
if (i === max - 1) {
t.same(actual, expect)
t.ok(fs.MAX_OPEN < limit)
t.end()
}
fs.close(fd)
} }
})
test('readdir emfile autoreduce', function (t) {
fs.MAX_OPEN = 1024
var max = 12
for (var i = 0; i < max; i ++) {
fs.readdir(__dirname, next(i))
}
var expect =
[ [0,[__filename,"some-other-file.js"],null,7,4,7,7],
[1,[__filename,"some-other-file.js"],null,7,4,7,6],
[2,[__filename,"some-other-file.js"],null,7,4,7,5],
[3,[__filename,"some-other-file.js"],null,7,4,7,4],
[4,[__filename,"some-other-file.js"],null,7,4,7,3],
[5,[__filename,"some-other-file.js"],null,7,4,6,2],
[6,[__filename,"some-other-file.js"],null,7,4,5,1],
[7,[__filename,"some-other-file.js"],null,7,4,4,0],
[8,[__filename,"some-other-file.js"],null,7,4,3,3],
[9,[__filename,"some-other-file.js"],null,7,4,2,2],
[10,[__filename,"some-other-file.js"],null,7,4,1,1],
[11,[__filename,"some-other-file.js"],null,7,4,0,0] ]
var actual = []
function next (i) { return function (er, files) {
if (er)
throw er
var line = [i, files, er, fs.MAX_OPEN, fs.MIN_MAX_OPEN, fs._curOpen, fds ]
actual.push(line)
if (i === max - 1) {
t.ok(fs.MAX_OPEN < limit)
t.same(actual, expect)
t.end()
}
} }
})
The ISC License
Copyright (c) Isaac Z. Schlueter, Ben Noordhuis, and Contributors
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# graceful-fs
graceful-fs functions as a drop-in replacement for the fs module,
making various improvements.
The improvements are meant to normalize behavior across different
platforms and environments, and to make filesystem access more
resilient to errors.
## Improvements over [fs module](https://nodejs.org/api/fs.html)
* Queues up `open` and `readdir` calls, and retries them once
something closes if there is an EMFILE error from too many file
descriptors.
* fixes `lchmod` for Node versions prior to 0.6.2.
* implements `fs.lutimes` if possible. Otherwise it becomes a noop.
* ignores `EINVAL` and `EPERM` errors in `chown`, `fchown` or
`lchown` if the user isn't root.
* makes `lchmod` and `lchown` become noops, if not available.
* retries reading a file if `read` results in EAGAIN error.
On Windows, it retries renaming a file for up to one second if `EACCESS`
or `EPERM` error occurs, likely because antivirus software has locked
the directory.
## USAGE
```javascript
// use just like fs
var fs = require('graceful-fs')
// now go and do stuff with it...
fs.readFileSync('some-file-or-whatever')
```
## Global Patching
If you want to patch the global fs module (or any other fs-like
module) you can do this:
```javascript
// Make sure to read the caveat below.
var realFs = require('fs')
var gracefulFs = require('graceful-fs')
gracefulFs.gracefulify(realFs)
```
This should only ever be done at the top-level application layer, in
order to delay on EMFILE errors from any fs-using dependencies. You
should **not** do this in a library, because it can cause unexpected
delays in other parts of the program.
## Changes
This module is fairly stable at this point, and used by a lot of
things. That being said, because it implements a subtle behavior
change in a core part of the node API, even modest changes can be
extremely breaking, and the versioning is thus biased towards
bumping the major when in doubt.
The main change between major versions has been switching between
providing a fully-patched `fs` module vs monkey-patching the node core
builtin, and the approach by which a non-monkey-patched `fs` was
created.
The goal is to trade `EMFILE` errors for slower fs operations. So, if
you try to open a zillion files, rather than crashing, `open`
operations will be queued up and wait for something else to `close`.
There are advantages to each approach. Monkey-patching the fs means
that no `EMFILE` errors can possibly occur anywhere in your
application, because everything is using the same core `fs` module,
which is patched. However, it can also obviously cause undesirable
side-effects, especially if the module is loaded multiple times.
Implementing a separate-but-identical patched `fs` module is more
surgical (and doesn't run the risk of patching multiple times), but
also imposes the challenge of keeping in sync with the core module.
The current approach loads the `fs` module, and then creates a
lookalike object that has all the same methods, except a few that are
patched. It is safe to use in all versions of Node from 0.8 through
7.0.
### v4
* Do not monkey-patch the fs module. This module may now be used as a
drop-in dep, and users can opt into monkey-patching the fs builtin
if their app requires it.
### v3
* Monkey-patch fs, because the eval approach no longer works on recent
node.
* fixed possible type-error throw if rename fails on windows
* verify that we *never* get EMFILE errors
* Ignore ENOSYS from chmod/chown
* clarify that graceful-fs must be used as a drop-in
### v2.1.0
* Use eval rather than monkey-patching fs.
* readdir: Always sort the results
* win32: requeue a file if error has an OK status
### v2.0
* A return to monkey patching
* wrap process.cwd
### v1.1
* wrap readFile
* Wrap fs.writeFile.
* readdir protection
* Don't clobber the fs builtin
* Handle fs.read EAGAIN errors by trying again
* Expose the curOpen counter
* No-op lchown/lchmod if not implemented
* fs.rename patch only for win32
* Patch fs.rename to handle AV software on Windows
* Close #4 Chown should not fail on einval or eperm if non-root
* Fix isaacs/fstream#1 Only wrap fs one time
* Fix #3 Start at 1024 max files, then back off on EMFILE
* lutimes that doens't blow up on Linux
* A full on-rewrite using a queue instead of just swallowing the EMFILE error
* Wrap Read/Write streams as well
### 1.0
* Update engines for node 0.6
* Be lstat-graceful on Windows
* first
'use strict'
module.exports = clone
function clone (obj) {
if (obj === null || typeof obj !== 'object')
return obj
if (obj instanceof Object)
var copy = { __proto__: obj.__proto__ }
else
var copy = Object.create(null)
Object.getOwnPropertyNames(obj).forEach(function (key) {
Object.defineProperty(copy, key, Object.getOwnPropertyDescriptor(obj, key))
})
return copy
}
var fs = require('fs')
var polyfills = require('./polyfills.js')
var legacy = require('./legacy-streams.js')
var clone = require('./clone.js')
var util = require('util')
/* istanbul ignore next - node 0.x polyfill */
var gracefulQueue
var previousSymbol
/* istanbul ignore else - node 0.x polyfill */
if (typeof Symbol === 'function' && typeof Symbol.for === 'function') {
gracefulQueue = Symbol.for('graceful-fs.queue')
// This is used in testing by future versions
previousSymbol = Symbol.for('graceful-fs.previous')
} else {
gracefulQueue = '___graceful-fs.queue'
previousSymbol = '___graceful-fs.previous'
}
function noop () {}
var debug = noop
if (util.debuglog)
debug = util.debuglog('gfs4')
else if (/\bgfs4\b/i.test(process.env.NODE_DEBUG || ''))
debug = function() {
var m = util.format.apply(util, arguments)
m = 'GFS4: ' + m.split(/\n/).join('\nGFS4: ')
console.error(m)
}
// Once time initialization
if (!global[gracefulQueue]) {
// This queue can be shared by multiple loaded instances
var queue = []
Object.defineProperty(global, gracefulQueue, {
get: function() {
return queue
}
})
// Patch fs.close/closeSync to shared queue version, because we need
// to retry() whenever a close happens *anywhere* in the program.
// This is essential when multiple graceful-fs instances are
// in play at the same time.
fs.close = (function (fs$close) {
function close (fd, cb) {
return fs$close.call(fs, fd, function (err) {
// This function uses the graceful-fs shared queue
if (!err) {
retry()
}
if (typeof cb === 'function')
cb.apply(this, arguments)
})
}
Object.defineProperty(close, previousSymbol, {
value: fs$close
})
return close
})(fs.close)
fs.closeSync = (function (fs$closeSync) {
function closeSync (fd) {
// This function uses the graceful-fs shared queue
fs$closeSync.apply(fs, arguments)
retry()
}
Object.defineProperty(closeSync, previousSymbol, {
value: fs$closeSync
})
return closeSync
})(fs.closeSync)
if (/\bgfs4\b/i.test(process.env.NODE_DEBUG || '')) {
process.on('exit', function() {
debug(global[gracefulQueue])
require('assert').equal(global[gracefulQueue].length, 0)
})
}
}
module.exports = patch(clone(fs))
if (process.env.TEST_GRACEFUL_FS_GLOBAL_PATCH && !fs.__patched) {
module.exports = patch(fs)
fs.__patched = true;
}
function patch (fs) {
// Everything that references the open() function needs to be in here
polyfills(fs)
fs.gracefulify = patch
fs.createReadStream = createReadStream
fs.createWriteStream = createWriteStream
var fs$readFile = fs.readFile
fs.readFile = readFile
function readFile (path, options, cb) {
if (typeof options === 'function')
cb = options, options = null
return go$readFile(path, options, cb)
function go$readFile (path, options, cb) {
return fs$readFile(path, options, function (err) {
if (err && (err.code === 'EMFILE' || err.code === 'ENFILE'))
enqueue([go$readFile, [path, options, cb]])
else {
if (typeof cb === 'function')
cb.apply(this, arguments)
retry()
}
})
}
}
var fs$writeFile = fs.writeFile
fs.writeFile = writeFile
function writeFile (path, data, options, cb) {
if (typeof options === 'function')
cb = options, options = null
return go$writeFile(path, data, options, cb)
function go$writeFile (path, data, options, cb) {
return fs$writeFile(path, data, options, function (err) {
if (err && (err.code === 'EMFILE' || err.code === 'ENFILE'))
enqueue([go$writeFile, [path, data, options, cb]])
else {
if (typeof cb === 'function')
cb.apply(this, arguments)
retry()
}
})
}
}
var fs$appendFile = fs.appendFile
if (fs$appendFile)
fs.appendFile = appendFile
function appendFile (path, data, options, cb) {
if (typeof options === 'function')
cb = options, options = null
return go$appendFile(path, data, options, cb)
function go$appendFile (path, data, options, cb) {
return fs$appendFile(path, data, options, function (err) {
if (err && (err.code === 'EMFILE' || err.code === 'ENFILE'))
enqueue([go$appendFile, [path, data, options, cb]])
else {
if (typeof cb === 'function')
cb.apply(this, arguments)
retry()
}
})
}
}
var fs$readdir = fs.readdir
fs.readdir = readdir
function readdir (path, options, cb) {
var args = [path]
if (typeof options !== 'function') {
args.push(options)
} else {
cb = options
}
args.push(go$readdir$cb)
return go$readdir(args)
function go$readdir$cb (err, files) {
if (files && files.sort)
files.sort()
if (err && (err.code === 'EMFILE' || err.code === 'ENFILE'))
enqueue([go$readdir, [args]])
else {
if (typeof cb === 'function')
cb.apply(this, arguments)
retry()
}
}
}
function go$readdir (args) {
return fs$readdir.apply(fs, args)
}
if (process.version.substr(0, 4) === 'v0.8') {
var legStreams = legacy(fs)
ReadStream = legStreams.ReadStream
WriteStream = legStreams.WriteStream
}
var fs$ReadStream = fs.ReadStream
if (fs$ReadStream) {
ReadStream.prototype = Object.create(fs$ReadStream.prototype)
ReadStream.prototype.open = ReadStream$open
}
var fs$WriteStream = fs.WriteStream
if (fs$WriteStream) {
WriteStream.prototype = Object.create(fs$WriteStream.prototype)
WriteStream.prototype.open = WriteStream$open
}
Object.defineProperty(fs, 'ReadStream', {
get: function () {
return ReadStream
},
set: function (val) {
ReadStream = val
},
enumerable: true,
configurable: true
})
Object.defineProperty(fs, 'WriteStream', {
get: function () {
return WriteStream
},
set: function (val) {
WriteStream = val
},
enumerable: true,
configurable: true
})
// legacy names
var FileReadStream = ReadStream
Object.defineProperty(fs, 'FileReadStream', {
get: function () {
return FileReadStream
},
set: function (val) {
FileReadStream = val
},
enumerable: true,
configurable: true
})
var FileWriteStream = WriteStream
Object.defineProperty(fs, 'FileWriteStream', {
get: function () {
return FileWriteStream
},
set: function (val) {
FileWriteStream = val
},
enumerable: true,
configurable: true
})
function ReadStream (path, options) {
if (this instanceof ReadStream)
return fs$ReadStream.apply(this, arguments), this
else
return ReadStream.apply(Object.create(ReadStream.prototype), arguments)
}
function ReadStream$open () {
var that = this
open(that.path, that.flags, that.mode, function (err, fd) {
if (err) {
if (that.autoClose)
that.destroy()
that.emit('error', err)
} else {
that.fd = fd
that.emit('open', fd)
that.read()
}
})
}
function WriteStream (path, options) {
if (this instanceof WriteStream)
return fs$WriteStream.apply(this, arguments), this
else
return WriteStream.apply(Object.create(WriteStream.prototype), arguments)
}
function WriteStream$open () {
var that = this
open(that.path, that.flags, that.mode, function (err, fd) {
if (err) {
that.destroy()
that.emit('error', err)
} else {
that.fd = fd
that.emit('open', fd)
}
})
}
function createReadStream (path, options) {
return new fs.ReadStream(path, options)
}
function createWriteStream (path, options) {
return new fs.WriteStream(path, options)
}
var fs$open = fs.open
fs.open = open
function open (path, flags, mode, cb) {
if (typeof mode === 'function')
cb = mode, mode = null
return go$open(path, flags, mode, cb)
function go$open (path, flags, mode, cb) {
return fs$open(path, flags, mode, function (err, fd) {
if (err && (err.code === 'EMFILE' || err.code === 'ENFILE'))
enqueue([go$open, [path, flags, mode, cb]])
else {
if (typeof cb === 'function')
cb.apply(this, arguments)
retry()
}
})
}
}
return fs
}
function enqueue (elem) {
debug('ENQUEUE', elem[0].name, elem[1])
global[gracefulQueue].push(elem)
}
function retry () {
var elem = global[gracefulQueue].shift()
if (elem) {
debug('RETRY', elem[0].name, elem[1])
elem[0].apply(null, elem[1])
}
}
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment