diff --git a/COLLABORATOR_GUIDE.md b/COLLABORATOR_GUIDE.md index 89c532e9d025f5..0cb03822c98947 100644 --- a/COLLABORATOR_GUIDE.md +++ b/COLLABORATOR_GUIDE.md @@ -416,14 +416,15 @@ longer be used. Node.js uses three Deprecation levels: -* *Documentation-Only Deprecation* refers to elements of the Public API that are - being staged for deprecation in a future Node.js major release. An explicit - notice indicating the deprecated status is added to the API documentation - but no functional changes are implemented in the code. There will be no - runtime deprecation warnings emitted for such deprecations by default. - Documentation-only deprecations may trigger a runtime warning when Node.js - is started with the [`--pending-deprecation`][] flag or the - `NODE_PENDING_DEPRECATION=1` environment variable is set. +* *Documentation-Only Deprecation* refers to elements of the Public API that + should be avoided by developers and that might be staged for a runtime + deprecation in a future Node.js major release. An explicit notice indicating + the deprecation status is added to the API documentation but no functional + changes are implemented in the code. By default there will be no deprecation + warnings emitted for such deprecations at runtime. Documentation-only + deprecations may trigger a runtime warning when Node.js is started with the + [`--pending-deprecation`][] flag or the `NODE_PENDING_DEPRECATION=1` + environment variable is set. * *Runtime Deprecation* refers to the use of process warnings emitted at runtime the first time that a deprecated API is used. A command-line @@ -831,7 +832,6 @@ LTS working group and the Release team. | Subsystem | Maintainers | | --- | --- | | `benchmark/*` | @nodejs/benchmarking, @mscdex | -| `bootstrap_node.js` | @nodejs/process | | `doc/*`, `*.md` | @nodejs/documentation | | `lib/assert` | @nodejs/testing | | `lib/async_hooks` | @nodejs/async\_hooks for bugs/reviews (+ @nodejs/diagnostics for API) | @@ -844,6 +844,7 @@ LTS working group and the Release team. | `lib/fs`, `src/{fs,file}` | @nodejs/fs | | `lib/{_}http{*}` | @nodejs/http | | `lib/inspector.js`, `src/inspector_*` | @nodejs/V8-inspector | +| `lib/internal/bootstrap/*` | @nodejs/process | | `lib/internal/url`, `src/node_url` | @nodejs/url | | `lib/net` | @bnoordhuis, @indutny, @nodejs/streams | | `lib/repl` | @nodejs/repl | @@ -851,13 +852,13 @@ LTS working group and the Release team. | `lib/timers` | @nodejs/timers | | `lib/util` | @nodejs/util | | `lib/zlib` | @nodejs/zlib | -| `src/async-wrap.*` | @nodejs/async\_hooks | +| `src/async_wrap.*` | @nodejs/async\_hooks | | `src/node_api.*` | @nodejs/n-api | | `src/node_crypto.*` | @nodejs/crypto | | `test/*` | @nodejs/testing | | `tools/node_modules/eslint`, `.eslintrc` | @nodejs/linting | | build | @nodejs/build | -| `src/module_wrap.*`, `lib/internal/loader/*`, `lib/internal/vm/Module.js` | @nodejs/modules | +| `src/module_wrap.*`, `lib/internal/modules/*`, `lib/internal/vm/module.js` | @nodejs/modules | | GYP | @nodejs/gyp | | performance | @nodejs/performance | | platform specific | @nodejs/platform-{aix,arm,freebsd,macos,ppc,smartos,s390,windows} | diff --git a/CPP_STYLE_GUIDE.md b/CPP_STYLE_GUIDE.md index edc5f0f12e212c..41e1f082f87751 100644 --- a/CPP_STYLE_GUIDE.md +++ b/CPP_STYLE_GUIDE.md @@ -12,6 +12,7 @@ * [CamelCase for methods, functions, and classes](#camelcase-for-methods-functions-and-classes) * [snake\_case for local variables and parameters](#snake_case-for-local-variables-and-parameters) * [snake\_case\_ for private class fields](#snake_case_-for-private-class-fields) + * [snake\_case\_ for C-like structs](#snake_case_-for-c-like-structs) * [Space after `template`](#space-after-template) * [Memory Management](#memory-management) * [Memory allocation](#memory-allocation) @@ -147,6 +148,15 @@ class Foo { }; ``` +## snake\_case\_ for C-like structs +For plain C-like structs snake_case can be used. + +```c++ +struct foo_bar { + int name; +} +``` + ## Space after `template` ```c++ diff --git a/Makefile b/Makefile index b3e65cec6030a9..02d97b633d87d6 100644 --- a/Makefile +++ b/Makefile @@ -813,8 +813,8 @@ release-only: exit 1 ; \ fi @if [ "$(DISTTYPE)" != "nightly" ] && [ "$(DISTTYPE)" != "next-nightly" ] && \ - `grep -q DEP00XX doc/api/deprecations.md`; then \ - echo 'Please update DEP00XX in doc/api/deprecations.md (See doc/releases.md)' ; \ + `grep -q DEP...X doc/api/deprecations.md`; then \ + echo 'Please update DEP...X in doc/api/deprecations.md (See doc/releases.md)' ; \ exit 1 ; \ fi @if [ "$(shell git status --porcelain | egrep -v '^\?\? ')" = "" ]; then \ diff --git a/README.md b/README.md index ef733c277619b5..df95961979edae 100644 --- a/README.md +++ b/README.md @@ -514,8 +514,6 @@ For more information about the governance of the Node.js project, see **Trivikram Kamat** <trivikr.dev@gmail.com> * [Trott](https://github.com/Trott) - **Rich Trott** <rtrott@gmail.com> (he/him) -* [tunniclm](https://github.com/tunniclm) - -**Mike Tunnicliffe** <m.j.tunnicliffe@gmail.com> * [vdeturckheim](https://github.com/vdeturckheim) - **Vladimir de Turckheim** <vlad2t@hotmail.com> (he/him) * [vkurchatkin](https://github.com/vkurchatkin) - @@ -559,6 +557,8 @@ For more information about the governance of the Node.js project, see **Alex Kocharin** <alex@kocharin.ru> * [tellnes](https://github.com/tellnes) - **Christian Tellnes** <christian@tellnes.no> +* [tunniclm](https://github.com/tunniclm) - +**Mike Tunnicliffe** <m.j.tunnicliffe@gmail.com> Collaborators follow the [COLLABORATOR_GUIDE.md](./COLLABORATOR_GUIDE.md) in maintaining the Node.js project. diff --git a/benchmark/events/ee-add-remove.js b/benchmark/events/ee-add-remove.js index eee8ff4524ed1a..54e680f74ae3e1 100644 --- a/benchmark/events/ee-add-remove.js +++ b/benchmark/events/ee-add-remove.js @@ -2,7 +2,7 @@ const common = require('../common.js'); const events = require('events'); -const bench = common.createBenchmark(main, { n: [25e4] }); +const bench = common.createBenchmark(main, { n: [1e6] }); function main({ n }) { const ee = new events.EventEmitter(); diff --git a/benchmark/fs/bench-stat-promise.js b/benchmark/fs/bench-stat-promise.js index b0317455728b46..96c7058fa6218a 100644 --- a/benchmark/fs/bench-stat-promise.js +++ b/benchmark/fs/bench-stat-promise.js @@ -1,7 +1,7 @@ 'use strict'; const common = require('../common'); -const fsPromises = require('fs/promises'); +const fsPromises = require('fs').promises; const bench = common.createBenchmark(main, { n: [20e4], diff --git a/benchmark/process/next-tick-depth-args.js b/benchmark/process/next-tick-depth-args.js index 52d349c776b326..a7670d99efc354 100644 --- a/benchmark/process/next-tick-depth-args.js +++ b/benchmark/process/next-tick-depth-args.js @@ -8,13 +8,14 @@ const bench = common.createBenchmark(main, { process.maxTickDepth = Infinity; function main({ n }) { + let counter = n; function cb4(arg1, arg2, arg3, arg4) { - if (--n) { - if (n % 4 === 0) + if (--counter) { + if (counter % 4 === 0) process.nextTick(cb4, 3.14, 1024, true, false); - else if (n % 3 === 0) + else if (counter % 3 === 0) process.nextTick(cb3, 512, true, null); - else if (n % 2 === 0) + else if (counter % 2 === 0) process.nextTick(cb2, false, 5.1); else process.nextTick(cb1, 0); @@ -22,12 +23,12 @@ function main({ n }) { bench.end(n); } function cb3(arg1, arg2, arg3) { - if (--n) { - if (n % 4 === 0) + if (--counter) { + if (counter % 4 === 0) process.nextTick(cb4, 3.14, 1024, true, false); - else if (n % 3 === 0) + else if (counter % 3 === 0) process.nextTick(cb3, 512, true, null); - else if (n % 2 === 0) + else if (counter % 2 === 0) process.nextTick(cb2, false, 5.1); else process.nextTick(cb1, 0); @@ -35,12 +36,12 @@ function main({ n }) { bench.end(n); } function cb2(arg1, arg2) { - if (--n) { - if (n % 4 === 0) + if (--counter) { + if (counter % 4 === 0) process.nextTick(cb4, 3.14, 1024, true, false); - else if (n % 3 === 0) + else if (counter % 3 === 0) process.nextTick(cb3, 512, true, null); - else if (n % 2 === 0) + else if (counter % 2 === 0) process.nextTick(cb2, false, 5.1); else process.nextTick(cb1, 0); @@ -48,12 +49,12 @@ function main({ n }) { bench.end(n); } function cb1(arg1) { - if (--n) { - if (n % 4 === 0) + if (--counter) { + if (counter % 4 === 0) process.nextTick(cb4, 3.14, 1024, true, false); - else if (n % 3 === 0) + else if (counter % 3 === 0) process.nextTick(cb3, 512, true, null); - else if (n % 2 === 0) + else if (counter % 2 === 0) process.nextTick(cb2, false, 5.1); else process.nextTick(cb1, 0); diff --git a/benchmark/process/next-tick-depth.js b/benchmark/process/next-tick-depth.js index 6669936e398272..1ad32c806181b0 100644 --- a/benchmark/process/next-tick-depth.js +++ b/benchmark/process/next-tick-depth.js @@ -7,11 +7,11 @@ const bench = common.createBenchmark(main, { process.maxTickDepth = Infinity; function main({ n }) { - + let counter = n; bench.start(); process.nextTick(onNextTick); function onNextTick() { - if (--n) + if (--counter) process.nextTick(onNextTick); else bench.end(n); diff --git a/benchmark/process/next-tick-exec-args.js b/benchmark/process/next-tick-exec-args.js index 3be86cc08e177a..f5d0fb94224148 100644 --- a/benchmark/process/next-tick-exec-args.js +++ b/benchmark/process/next-tick-exec-args.js @@ -5,8 +5,11 @@ const bench = common.createBenchmark(main, { }); function main({ n }) { + function onNextTick(i) { + if (i + 1 === n) + bench.end(n); + } - bench.start(); for (var i = 0; i < n; i++) { if (i % 4 === 0) process.nextTick(onNextTick, i, true, 10, 'test'); @@ -17,8 +20,6 @@ function main({ n }) { else process.nextTick(onNextTick, i); } - function onNextTick(i) { - if (i + 1 === n) - bench.end(n); - } + + bench.start(); } diff --git a/benchmark/process/next-tick-exec.js b/benchmark/process/next-tick-exec.js index d00ee017de4bff..936b253bfaf324 100644 --- a/benchmark/process/next-tick-exec.js +++ b/benchmark/process/next-tick-exec.js @@ -5,13 +5,14 @@ const bench = common.createBenchmark(main, { }); function main({ n }) { - - bench.start(); - for (var i = 0; i < n; i++) { - process.nextTick(onNextTick, i); - } function onNextTick(i) { if (i + 1 === n) bench.end(n); } + + for (var i = 0; i < n; i++) { + process.nextTick(onNextTick, i); + } + + bench.start(); } diff --git a/benchmark/util/splice-one.js b/benchmark/util/splice-one.js new file mode 100644 index 00000000000000..5c2a39f6d72a11 --- /dev/null +++ b/benchmark/util/splice-one.js @@ -0,0 +1,33 @@ +'use strict'; + +const common = require('../common'); + +const bench = common.createBenchmark(main, { + n: [1e7], + pos: ['start', 'middle', 'end'], + size: [10, 100, 500], +}, { flags: ['--expose-internals'] }); + +function main({ n, pos, size }) { + const { spliceOne } = require('internal/util'); + const arr = new Array(size); + arr.fill(''); + let index; + switch (pos) { + case 'end': + index = size - 1; + break; + case 'middle': + index = Math.floor(size / 2); + break; + default: // start + index = 0; + } + + bench.start(); + for (var i = 0; i < n; i++) { + spliceOne(arr, index); + arr.push(''); + } + bench.end(n); +} diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index 68d0a359292bd7..81f014cbd4b68d 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -11,7 +11,7 @@ #define V8_MAJOR_VERSION 6 #define V8_MINOR_VERSION 6 #define V8_BUILD_NUMBER 346 -#define V8_PATCH_LEVEL 24 +#define V8_PATCH_LEVEL 27 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/src/keys.cc b/deps/v8/src/keys.cc index 4f59c2553caf0c..638c83f4270b95 100644 --- a/deps/v8/src/keys.cc +++ b/deps/v8/src/keys.cc @@ -77,7 +77,14 @@ void KeyAccumulator::AddKey(Handle key, AddKeyConversion convert) { Handle::cast(key)->AsArrayIndex(&index)) { key = isolate_->factory()->NewNumberFromUint(index); } - keys_ = OrderedHashSet::Add(keys(), key); + Handle new_set = OrderedHashSet::Add(keys(), key); + if (*new_set != *keys_) { + // The keys_ Set is converted directly to a FixedArray in GetKeys which can + // be left-trimmer. Hence the previous Set should not keep a pointer to the + // new one. + keys_->set(OrderedHashTableBase::kNextTableIndex, Smi::kZero); + keys_ = new_set; + } } void KeyAccumulator::AddKeys(Handle array, diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc index dc1f690a63ac8b..915d4d9ead6b1f 100644 --- a/deps/v8/src/wasm/wasm-js.cc +++ b/deps/v8/src/wasm/wasm-js.cc @@ -330,16 +330,22 @@ MaybeLocal WebAssemblyInstantiateImpl(Isolate* isolate, i::MaybeHandle instance_object; { ScheduledErrorThrower thrower(i_isolate, "WebAssembly Instantiation"); + + // TODO(ahaas): These checks on the module should not be necessary here They + // are just a workaround for https://crbug.com/837417. + i::Handle module_obj = Utils::OpenHandle(*module); + if (!module_obj->IsWasmModuleObject()) { + thrower.TypeError("Argument 0 must be a WebAssembly.Module object"); + return {}; + } + i::MaybeHandle maybe_imports = GetValueAsImports(ffi, &thrower); if (thrower.error()) return {}; - i::Handle module_obj = - i::Handle::cast( - Utils::OpenHandle(Object::Cast(*module))); instance_object = i_isolate->wasm_engine()->SyncInstantiate( - i_isolate, &thrower, module_obj, maybe_imports, - i::MaybeHandle()); + i_isolate, &thrower, i::Handle::cast(module_obj), + maybe_imports, i::MaybeHandle()); } DCHECK_EQ(instance_object.is_null(), i_isolate->has_scheduled_exception()); @@ -347,25 +353,7 @@ MaybeLocal WebAssemblyInstantiateImpl(Isolate* isolate, return Utils::ToLocal(instance_object.ToHandleChecked()); } -// Entered as internal implementation detail of sync and async instantiate. -// args[0] *must* be a WebAssembly.Module. -void WebAssemblyInstantiateImplCallback( - const v8::FunctionCallbackInfo& args) { - DCHECK_GE(args.Length(), 1); - v8::Isolate* isolate = args.GetIsolate(); - MicrotasksScope does_not_run_microtasks(isolate, - MicrotasksScope::kDoNotRunMicrotasks); - - HandleScope scope(args.GetIsolate()); - Local module = args[0]; - Local ffi = args.Data(); - Local instance; - if (WebAssemblyInstantiateImpl(isolate, module, ffi).ToLocal(&instance)) { - args.GetReturnValue().Set(instance); - } -} - -void WebAssemblyInstantiateToPairCallback( +void WebAssemblyInstantiateCallback( const v8::FunctionCallbackInfo& args) { DCHECK_GE(args.Length(), 1); Isolate* isolate = args.GetIsolate(); @@ -454,7 +442,7 @@ void WebAssemblyInstantiateStreaming( DCHECK(!module_promise.IsEmpty()); Local data = args[1]; ASSIGN(Function, instantiate_impl, - Function::New(context, WebAssemblyInstantiateToPairCallback, data)); + Function::New(context, WebAssemblyInstantiateCallback, data)); ASSIGN(Promise, result, module_promise->Then(context, instantiate_impl)); args.GetReturnValue().Set(result); } @@ -476,10 +464,12 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo& args) { Local context = isolate->GetCurrentContext(); ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context)); - Local module_promise = resolver->GetPromise(); - args.GetReturnValue().Set(module_promise); + Local promise = resolver->GetPromise(); + args.GetReturnValue().Set(promise); Local first_arg_value = args[0]; + // If args.Length < 2, this will be undefined - see FunctionCallbackInfo. + Local ffi = args[1]; i::Handle first_arg = Utils::OpenHandle(*first_arg_value); if (!first_arg->IsJSObject()) { thrower.TypeError( @@ -490,26 +480,35 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo& args) { return; } - FunctionCallback instantiator = nullptr; if (first_arg->IsWasmModuleObject()) { - module_promise = resolver->GetPromise(); - if (!resolver->Resolve(context, first_arg_value).IsJust()) return; - instantiator = WebAssemblyInstantiateImplCallback; - } else { - ASSIGN(Function, async_compile, Function::New(context, WebAssemblyCompile)); - ASSIGN(Value, async_compile_retval, - async_compile->Call(context, args.Holder(), 1, &first_arg_value)); - module_promise = Local::Cast(async_compile_retval); - instantiator = WebAssemblyInstantiateToPairCallback; + i::Handle module_obj = + i::Handle::cast(first_arg); + // If args.Length < 2, this will be undefined - see FunctionCallbackInfo. + i::MaybeHandle maybe_imports = + GetValueAsImports(ffi, &thrower); + + if (thrower.error()) { + auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify())); + CHECK_IMPLIES(!maybe.FromMaybe(false), + i_isolate->has_scheduled_exception()); + return; + } + + i_isolate->wasm_engine()->AsyncInstantiate( + i_isolate, Utils::OpenHandle(*promise), module_obj, maybe_imports); + return; } - DCHECK(!module_promise.IsEmpty()); - DCHECK_NOT_NULL(instantiator); - // If args.Length < 2, this will be undefined - see FunctionCallbackInfo. - // We'll check for that in WebAssemblyInstantiateImpl. - Local data = args[1]; + + // We did not get a WasmModuleObject as input, we first have to compile the + // input. + ASSIGN(Function, async_compile, Function::New(context, WebAssemblyCompile)); + ASSIGN(Value, async_compile_retval, + async_compile->Call(context, args.Holder(), 1, &first_arg_value)); + promise = Local::Cast(async_compile_retval); + DCHECK(!promise.IsEmpty()); ASSIGN(Function, instantiate_impl, - Function::New(context, instantiator, data)); - ASSIGN(Promise, result, module_promise->Then(context, instantiate_impl)); + Function::New(context, WebAssemblyInstantiateCallback, ffi)); + ASSIGN(Promise, result, promise->Then(context, instantiate_impl)); args.GetReturnValue().Set(result); } diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-831984.js b/deps/v8/test/mjsunit/regress/regress-crbug-831984.js new file mode 100644 index 00000000000000..c4833232c4edfd --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-crbug-831984.js @@ -0,0 +1,10 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + + +let arr = [...Array(9000)]; +for (let j = 0; j < 40; j++) { + Reflect.ownKeys(arr).shift(); + Array(64386); +} diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-836141.js b/deps/v8/test/mjsunit/regress/wasm/regress-836141.js new file mode 100644 index 00000000000000..b37dbea628de37 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/wasm/regress-836141.js @@ -0,0 +1,20 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +load('test/mjsunit/wasm/wasm-constants.js'); +load('test/mjsunit/wasm/wasm-module-builder.js'); + +const builder = new WasmModuleBuilder(); +builder.addMemory(16, 32); +builder.addFunction("test", kSig_i_v).addBody([ + kExprI32Const, 12, // i32.const 0 +]); + +let module = new WebAssembly.Module(builder.toBuffer()); +module.then = () => { + // Use setTimeout to get out of the promise chain. + setTimeout(assertUnreachable); +}; + +WebAssembly.instantiate(module); diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-837417.js b/deps/v8/test/mjsunit/regress/wasm/regress-837417.js new file mode 100644 index 00000000000000..572139fac55825 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/wasm/regress-837417.js @@ -0,0 +1,23 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +load('test/mjsunit/wasm/wasm-constants.js'); +load('test/mjsunit/wasm/wasm-module-builder.js'); + +const builder = new WasmModuleBuilder(); +builder.addMemory(16, 32); +builder.addFunction("test", kSig_i_v).addBody([ + kExprI32Const, 12, // i32.const 0 +]); + +WebAssembly.Module.prototype.then = resolve => resolve( + String.fromCharCode(null, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41)); + +// WebAssembly.instantiate should not actually throw a TypeError in this case. +// However, this is a workaround for +assertPromiseResult( + WebAssembly.instantiate(builder.toBuffer()), assertUnreachable, + exception => { + assertInstanceof(exception, TypeError); + }); diff --git a/doc/api/addons.md b/doc/api/addons.md index 46bc1e7522c505..a207a71b717604 100644 --- a/doc/api/addons.md +++ b/doc/api/addons.md @@ -9,7 +9,7 @@ just as if they were an ordinary Node.js module. They are used primarily to provide an interface between JavaScript running in Node.js and C/C++ libraries. At the moment, the method for implementing Addons is rather complicated, -involving knowledge of several components and APIs : +involving knowledge of several components and APIs: - V8: the C++ library Node.js currently uses to provide the JavaScript implementation. V8 provides the mechanisms for creating objects, @@ -93,7 +93,7 @@ There is no semi-colon after `NODE_MODULE` as it's not a function (see `node.h`). The `module_name` must match the filename of the final binary (excluding -the .node suffix). +the `.node` suffix). In the `hello.cc` example, then, the initialization function is `init` and the Addon module name is `addon`. @@ -1085,9 +1085,9 @@ console.log(result); ### AtExit hooks -An "AtExit" hook is a function that is invoked after the Node.js event loop +An `AtExit` hook is a function that is invoked after the Node.js event loop has ended but before the JavaScript VM is terminated and Node.js shuts down. -"AtExit" hooks are registered using the `node::AtExit` API. +`AtExit` hooks are registered using the `node::AtExit` API. #### void AtExit(callback, args) @@ -1099,12 +1099,12 @@ has ended but before the JavaScript VM is terminated and Node.js shuts down. Registers exit hooks that run after the event loop has ended but before the VM is killed. -AtExit takes two parameters: a pointer to a callback function to run at exit, +`AtExit` takes two parameters: a pointer to a callback function to run at exit, and a pointer to untyped context data to be passed to that callback. Callbacks are run in last-in first-out order. -The following `addon.cc` implements AtExit: +The following `addon.cc` implements `AtExit`: ```cpp // addon.cc diff --git a/doc/api/assert.md b/doc/api/assert.md index 468293b208a90d..43e5800ff031b8 100644 --- a/doc/api/assert.md +++ b/doc/api/assert.md @@ -165,10 +165,10 @@ added: v0.1.21 changes: - version: v9.0.0 pr-url: https://github.com/nodejs/node/pull/15001 - description: Error names and messages are now properly compared + description: The `Error` names and messages are now properly compared - version: v8.0.0 pr-url: https://github.com/nodejs/node/pull/12142 - description: Set and Map content is also compared + description: The `Set` and `Map` content is also compared - version: v6.4.0, v4.7.1 pr-url: https://github.com/nodejs/node/pull/8002 description: Typed array slices are handled correctly now. @@ -208,7 +208,7 @@ the [`RegExp`][] object are not enumerable: assert.deepEqual(/a/gi, new Date()); ``` -An exception is made for [`Map`][] and [`Set`][]. Maps and Sets have their +An exception is made for [`Map`][] and [`Set`][]. `Map`s and `Set`s have their contained items compared too, as expected. "Deep" equality means that the enumerable "own" properties of child objects @@ -264,15 +264,15 @@ changes: description: Enumerable symbol properties are now compared. - version: v9.0.0 pr-url: https://github.com/nodejs/node/pull/15036 - description: NaN is now compared using the + description: The `NaN` is now compared using the [SameValueZero](https://tc39.github.io/ecma262/#sec-samevaluezero) comparison. - version: v8.5.0 pr-url: https://github.com/nodejs/node/pull/15001 - description: Error names and messages are now properly compared + description: The `Error` names and messages are now properly compared - version: v8.0.0 pr-url: https://github.com/nodejs/node/pull/12142 - description: Set and Map content is also compared + description: The `Set` and `Map` content is also compared - version: v6.4.0, v4.7.1 pr-url: https://github.com/nodejs/node/pull/8002 description: Typed array slices are handled correctly now. @@ -303,8 +303,8 @@ are recursively evaluated also by the following rules. enumerable properties. * Enumerable own [`Symbol`][] properties are compared as well. * [Object wrappers][] are compared both as objects and unwrapped values. -* Object properties are compared unordered. -* Map keys and Set items are compared unordered. +* `Object` properties are compared unordered. +* `Map` keys and `Set` items are compared unordered. * Recursion stops when both sides differ or both sides encounter a circular reference. * [`WeakMap`][] and [`WeakSet`][] comparison does not rely on their values. See @@ -413,10 +413,10 @@ function and awaits the returned promise to complete. It will then check that the promise is not rejected. If `block` is a function and it throws an error synchronously, -`assert.doesNotReject()` will return a rejected Promise with that error. If the -function does not return a promise, `assert.doesNotReject()` will return a -rejected Promise with an [`ERR_INVALID_RETURN_VALUE`][] error. In both cases the -error handler is skipped. +`assert.doesNotReject()` will return a rejected `Promise` with that error. If +the function does not return a promise, `assert.doesNotReject()` will return a +rejected `Promise` with an [`ERR_INVALID_RETURN_VALUE`][] error. In both cases +the error handler is skipped. Please note: Using `assert.doesNotReject()` is actually not useful because there is little benefit by catching a rejection and then rejecting it again. Instead, @@ -494,7 +494,7 @@ assert.doesNotThrow( ``` However, the following will result in an `AssertionError` with the message -'Got unwanted exception (TypeError)..': +'Got unwanted exception...': ```js @@ -519,7 +519,7 @@ assert.doesNotThrow( /Wrong value/, 'Whoops' ); -// Throws: AssertionError: Got unwanted exception (TypeError). Whoops +// Throws: AssertionError: Got unwanted exception: Whoops ``` ## assert.equal(actual, expected[, message]) @@ -656,7 +656,7 @@ changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18247 description: Instead of throwing the original error it is now wrapped into - a AssertionError that contains the full stack trace. + an `AssertionError` that contains the full stack trace. - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18247 description: Value may now only be `undefined` or `null`. Before any truthy @@ -701,10 +701,10 @@ added: v0.1.21 changes: - version: v9.0.0 pr-url: https://github.com/nodejs/node/pull/15001 - description: Error names and messages are now properly compared + description: The `Error` names and messages are now properly compared - version: v8.0.0 pr-url: https://github.com/nodejs/node/pull/12142 - description: Set and Map content is also compared + description: The `Set` and `Map` content is also compared - version: v6.4.0, v4.7.1 pr-url: https://github.com/nodejs/node/pull/8002 description: Typed array slices are handled correctly now. @@ -774,18 +774,18 @@ added: v1.2.0 changes: - version: v9.0.0 pr-url: https://github.com/nodejs/node/pull/15398 - description: -0 and +0 are not considered equal anymore. + description: The `-0` and `+0` are not considered equal anymore. - version: v9.0.0 pr-url: https://github.com/nodejs/node/pull/15036 - description: NaN is now compared using the + description: The `NaN` is now compared using the [SameValueZero](https://tc39.github.io/ecma262/#sec-samevaluezero) comparison. - version: v9.0.0 pr-url: https://github.com/nodejs/node/pull/15001 - description: Error names and messages are now properly compared + description: The `Error` names and messages are now properly compared - version: v8.0.0 pr-url: https://github.com/nodejs/node/pull/12142 - description: Set and Map content is also compared + description: The `Set` and `Map` content is also compared - version: v6.4.0, v4.7.1 pr-url: https://github.com/nodejs/node/pull/8002 description: Typed array slices are handled correctly now. @@ -893,7 +893,8 @@ added: v0.1.21 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18319 - description: assert.ok() (no arguments) will now use a predefined error msg. + description: The `assert.ok()` (no arguments) will now use a predefined + error message. --> * `value` {any} * `message` {any} @@ -907,7 +908,7 @@ parameter is `undefined`, a default error message is assigned. If the `message` parameter is an instance of an [`Error`][] then it will be thrown instead of the `AssertionError`. If no arguments are passed in at all `message` will be set to the string: -"No value argument passed to assert.ok". +``'No value argument passed to `assert.ok()`'``. Be aware that in the `repl` the error message will be different to the one thrown in a file! See below for further details. @@ -966,9 +967,9 @@ function and awaits the returned promise to complete. It will then check that the promise is rejected. If `block` is a function and it throws an error synchronously, -`assert.rejects()` will return a rejected Promise with that error. If the +`assert.rejects()` will return a rejected `Promise` with that error. If the function does not return a promise, `assert.rejects()` will return a rejected -Promise with an [`ERR_INVALID_RETURN_VALUE`][] error. In both cases the error +`Promise` with an [`ERR_INVALID_RETURN_VALUE`][] error. In both cases the error handler is skipped. Besides the async nature to await the completion behaves identically to diff --git a/doc/api/async_hooks.md b/doc/api/async_hooks.md index 601dad93e75a57..b97bc73304a4d7 100644 --- a/doc/api/async_hooks.md +++ b/doc/api/async_hooks.md @@ -17,8 +17,8 @@ const async_hooks = require('async_hooks'); An asynchronous resource represents an object with an associated callback. This callback may be called multiple times, for example, the `'connection'` event in `net.createServer()`, or just a single time like in `fs.open()`. -A resource can also be closed before the callback is called. AsyncHook does not -explicitly distinguish between these different cases but will represent them +A resource can also be closed before the callback is called. `AsyncHook` does +not explicitly distinguish between these different cases but will represent them as the abstract concept that is a resource. ## Public API @@ -188,7 +188,7 @@ const hook = async_hooks.createHook(callbacks).enable(); * Returns: {AsyncHook} A reference to `asyncHook`. Disable the callbacks for a given `AsyncHook` instance from the global pool of -AsyncHook callbacks to be executed. Once a hook has been disabled it will not +`AsyncHook` callbacks to be executed. Once a hook has been disabled it will not be called again until enabled. For API consistency `disable()` also returns the `AsyncHook` instance. @@ -299,10 +299,10 @@ and document their own resource objects. For example, such a resource object could contain the SQL query being executed. In the case of Promises, the `resource` object will have `promise` property -that refers to the Promise that is being initialized, and a `isChainedPromise` -property, set to `true` if the promise has a parent promise, and `false` -otherwise. For example, in the case of `b = a.then(handler)`, `a` is considered -a parent Promise of `b`. Here, `b` is considered a chained promise. +that refers to the `Promise` that is being initialized, and an +`isChainedPromise` property, set to `true` if the promise has a parent promise, +and `false` otherwise. For example, in the case of `b = a.then(handler)`, `a` is +considered a parent `Promise` of `b`. Here, `b` is considered a chained promise. In some cases the resource object is reused for performance reasons, it is thus not safe to use it as a key in a `WeakMap` or add properties to it. @@ -466,7 +466,7 @@ added: v8.1.0 changes: - version: v8.2.0 pr-url: https://github.com/nodejs/node/pull/13490 - description: Renamed from currentId + description: Renamed from `currentId` --> * Returns: {number} The `asyncId` of the current execution context. Useful to @@ -498,7 +498,7 @@ const server = net.createServer(function onConnection(conn) { }); ``` -Note that promise contexts may not get precise executionAsyncIds by default. +Note that promise contexts may not get precise `executionAsyncIds` by default. See the section on [promise execution tracking][]. #### async_hooks.triggerAsyncId() @@ -521,12 +521,12 @@ const server = net.createServer((conn) => { }); ``` -Note that promise contexts may not get valid triggerAsyncIds by default. See +Note that promise contexts may not get valid `triggerAsyncId`s by default. See the section on [promise execution tracking][]. ## Promise execution tracking -By default, promise executions are not assigned asyncIds due to the relatively +By default, promise executions are not assigned `asyncId`s due to the relatively expensive nature of the [promise introspection API][PromiseHooks] provided by V8. This means that programs using promises or `async`/`await` will not get correct execution and trigger ids for promise callback contexts by default. @@ -542,10 +542,10 @@ Promise.resolve(1729).then(() => { // eid 1 tid 0 ``` -Observe that the `then` callback claims to have executed in the context of the +Observe that the `then()` callback claims to have executed in the context of the outer scope even though there was an asynchronous hop involved. Also note that -the triggerAsyncId value is 0, which means that we are missing context about the -resource that caused (triggered) the `then` callback to be executed. +the `triggerAsyncId` value is `0`, which means that we are missing context about +the resource that caused (triggered) the `then()` callback to be executed. Installing async hooks via `async_hooks.createHook` enables promise execution tracking. Example: @@ -562,15 +562,16 @@ Promise.resolve(1729).then(() => { In this example, adding any actual hook function enabled the tracking of promises. There are two promises in the example above; the promise created by -`Promise.resolve()` and the promise returned by the call to `then`. In the -example above, the first promise got the asyncId 6 and the latter got asyncId 7. -During the execution of the `then` callback, we are executing in the context of -promise with asyncId 7. This promise was triggered by async resource 6. +`Promise.resolve()` and the promise returned by the call to `then()`. In the +example above, the first promise got the `asyncId` `6` and the latter got +`asyncId` `7`. During the execution of the `then()` callback, we are executing +in the context of promise with `asyncId` `7`. This promise was triggered by +async resource `6`. Another subtlety with promises is that `before` and `after` callbacks are run -only on chained promises. That means promises not created by `then`/`catch` will -not have the `before` and `after` callbacks fired on them. For more details see -the details of the V8 [PromiseHooks][] API. +only on chained promises. That means promises not created by `then()`/`catch()` +will not have the `before` and `after` callbacks fired on them. For more details +see the details of the V8 [PromiseHooks][] API. ## JavaScript Embedder API @@ -632,8 +633,9 @@ asyncResource.emitAfter(); async event. **Default:** `executionAsyncId()`. * `requireManualDestroy` {boolean} Disables automatic `emitDestroy` when the object is garbage collected. This usually does not need to be set (even if - `emitDestroy` is called manually), unless the resource's asyncId is retrieved - and the sensitive API's `emitDestroy` is called with it. **Default:** `false`. + `emitDestroy` is called manually), unless the resource's `asyncId` is + retrieved and the sensitive API's `emitDestroy` is called with it. + **Default:** `false`. Example usage: diff --git a/doc/api/buffer.md b/doc/api/buffer.md index 312b4f7c17f8a3..c219f00e4a1f48 100644 --- a/doc/api/buffer.md +++ b/doc/api/buffer.md @@ -221,7 +221,7 @@ elements, and not as a byte array of the target type. That is, `[0x1020304]` or `[0x4030201]`. It is possible to create a new `Buffer` that shares the same allocated memory as -a [`TypedArray`] instance by using the TypeArray object's `.buffer` property. +a [`TypedArray`] instance by using the `TypeArray` object's `.buffer` property. ```js const arr = new Uint16Array(2); @@ -427,7 +427,8 @@ changes: run from code outside the `node_modules` directory. - version: v8.0.0 pr-url: https://github.com/nodejs/node/pull/12141 - description: new Buffer(size) will return zero-filled memory by default. + description: The `new Buffer(size)` will return zero-filled memory by + default. - version: v7.2.1 pr-url: https://github.com/nodejs/node/pull/9529 description: Calling this constructor no longer emits a deprecation warning. @@ -980,8 +981,8 @@ console.log(buf.toString('ascii')); ### buf.buffer -The `buffer` property references the underlying `ArrayBuffer` object based on -which this Buffer object is created. +* {ArrayBuffer} The underlying `ArrayBuffer` object based on + which this `Buffer` object is created. ```js const arrayBuffer = new ArrayBuffer(16); @@ -1507,8 +1508,8 @@ added: v0.11.15 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18395 - description: Removed noAssert and no implicit coercion of the offset to - uint32 anymore. + description: Removed `noAssert` and no implicit coercion of the offset + to `uint32` anymore. --> * `offset` {integer} Number of bytes to skip before starting to read. Must @@ -1537,8 +1538,8 @@ added: v0.11.15 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18395 - description: Removed noAssert and no implicit coercion of the offset to - uint32 anymore. + description: Removed `noAssert` and no implicit coercion of the offset + to `uint32` anymore. --> * `offset` {integer} Number of bytes to skip before starting to read. Must @@ -1566,8 +1567,8 @@ added: v0.5.0 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18395 - description: Removed noAssert and no implicit coercion of the offset to - uint32 anymore. + description: Removed `noAssert` and no implicit coercion of the offset + to `uint32` anymore. --> * `offset` {integer} Number of bytes to skip before starting to read. Must @@ -1596,8 +1597,8 @@ added: v0.5.5 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18395 - description: Removed noAssert and no implicit coercion of the offset to - uint32 anymore. + description: Removed `noAssert` and no implicit coercion of the offset + to `uint32` anymore. --> * `offset` {integer} Number of bytes to skip before starting to read. Must @@ -1628,8 +1629,8 @@ added: v0.5.5 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18395 - description: Removed noAssert and no implicit coercion of the offset to - uint32 anymore. + description: Removed `noAssert` and no implicit coercion of the offset + to `uint32` anymore. --> * `offset` {integer} Number of bytes to skip before starting to read. Must @@ -1660,8 +1661,8 @@ added: v0.11.15 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18395 - description: Removed noAssert and no implicit coercion of the offset and - byteLength to uint32 anymore. + description: Removed `noAssert` and no implicit coercion of the offset + and `byteLength` to `uint32` anymore. --> * `offset` {integer} Number of bytes to skip before starting to read. Must @@ -1693,8 +1694,8 @@ added: v0.5.0 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18395 - description: Removed noAssert and no implicit coercion of the offset to - uint32 anymore. + description: Removed `noAssert` and no implicit coercion of the offset + to `uint32` anymore. --> * `offset` {integer} Number of bytes to skip before starting to read. Must @@ -1721,8 +1722,8 @@ added: v0.5.5 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18395 - description: Removed noAssert and no implicit coercion of the offset to - uint32 anymore. + description: Removed `noAssert` and no implicit coercion of the offset + to `uint32` anymore. --> * `offset` {integer} Number of bytes to skip before starting to read. Must @@ -1755,8 +1756,8 @@ added: v0.5.5 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18395 - description: Removed noAssert and no implicit coercion of the offset to - uint32 anymore. + description: Removed `noAssert` and no implicit coercion of the offset + to `uint32` anymore. --> * `offset` {integer} Number of bytes to skip before starting to read. Must @@ -1785,8 +1786,8 @@ added: v0.11.15 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18395 - description: Removed noAssert and no implicit coercion of the offset and - byteLength to uint32 anymore. + description: Removed `noAssert` and no implicit coercion of the offset + and `byteLength` to `uint32` anymore. --> * `offset` {integer} Number of bytes to skip before starting to read. Must @@ -2102,8 +2103,8 @@ added: v0.11.15 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18395 - description: Removed noAssert and no implicit coercion of the offset to - uint32 anymore. + description: Removed `noAssert` and no implicit coercion of the offset + to `uint32` anymore. --> * `value` {number} Number to be written to `buf`. @@ -2137,8 +2138,8 @@ added: v0.11.15 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18395 - description: Removed noAssert and no implicit coercion of the offset to - uint32 anymore. + description: Removed `noAssert` and no implicit coercion of the offset + to `uint32` anymore. --> * `value` {number} Number to be written to `buf`. @@ -2171,8 +2172,8 @@ added: v0.5.0 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18395 - description: Removed noAssert and no implicit coercion of the offset to - uint32 anymore. + description: Removed `noAssert` and no implicit coercion of the offset + to `uint32` anymore. --> * `value` {integer} Number to be written to `buf`. @@ -2203,8 +2204,8 @@ added: v0.5.5 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18395 - description: Removed noAssert and no implicit coercion of the offset to - uint32 anymore. + description: Removed `noAssert` and no implicit coercion of the offset + to `uint32` anymore. --> * `value` {integer} Number to be written to `buf`. @@ -2236,8 +2237,8 @@ added: v0.5.5 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18395 - description: Removed noAssert and no implicit coercion of the offset to - uint32 anymore. + description: Removed `noAssert` and no implicit coercion of the offset + to `uint32` anymore. --> * `value` {integer} Number to be written to `buf`. @@ -2269,8 +2270,8 @@ added: v0.11.15 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18395 - description: Removed noAssert and no implicit coercion of the offset and - byteLength to uint32 anymore. + description: Removed `noAssert` and no implicit coercion of the offset + and `byteLength` to `uint32` anymore. --> * `value` {integer} Number to be written to `buf`. @@ -2304,8 +2305,8 @@ added: v0.5.0 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18395 - description: Removed noAssert and no implicit coercion of the offset to - uint32 anymore. + description: Removed `noAssert` and no implicit coercion of the offset + to `uint32` anymore. --> * `value` {integer} Number to be written to `buf`. @@ -2336,8 +2337,8 @@ added: v0.5.5 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18395 - description: Removed noAssert and no implicit coercion of the offset to - uint32 anymore. + description: Removed `noAssert` and no implicit coercion of the offset + to `uint32` anymore. --> * `value` {integer} Number to be written to `buf`. @@ -2373,8 +2374,8 @@ added: v0.5.5 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18395 - description: Removed noAssert and no implicit coercion of the offset to - uint32 anymore. + description: Removed `noAssert` and no implicit coercion of the offset + to `uint32` anymore. --> * `value` {integer} Number to be written to `buf`. @@ -2408,8 +2409,8 @@ added: v0.5.5 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/18395 - description: Removed noAssert and no implicit coercion of the offset and - byteLength to uint32 anymore. + description: Removed `noAssert` and no implicit coercion of the offset + and `byteLength` to `uint32` anymore. --> * `value` {integer} Number to be written to `buf`. diff --git a/doc/api/child_process.md b/doc/api/child_process.md index 47d68928e46ab2..1fc2855e4f65f5 100644 --- a/doc/api/child_process.md +++ b/doc/api/child_process.md @@ -211,7 +211,7 @@ Unlike the exec(3) POSIX system call, `child_process.exec()` does not replace the existing process and uses a shell to execute the command. If this method is invoked as its [`util.promisify()`][]ed version, it returns -a Promise for an object with `stdout` and `stderr` properties. In case of an +a `Promise` for an `Object` with `stdout` and `stderr` properties. In case of an error (including any error resulting in an exit code other than 0), a rejected promise is returned, with the same `error` object given in the callback, but with an additional two properties `stdout` and `stderr`. @@ -290,7 +290,7 @@ stderr output. If `encoding` is `'buffer'`, or an unrecognized character encoding, `Buffer` objects will be passed to the callback instead. If this method is invoked as its [`util.promisify()`][]ed version, it returns -a Promise for an object with `stdout` and `stderr` properties. In case of an +a `Promise` for an `Object` with `stdout` and `stderr` properties. In case of an error (including any error resulting in an exit code other than 0), a rejected promise is returned, with the same `error` object given in the callback, but with an additional two properties `stdout` and `stderr`. diff --git a/doc/api/cli.md b/doc/api/cli.md index 5bc673f5975d6f..6d055d9d03a6dd 100644 --- a/doc/api/cli.md +++ b/doc/api/cli.md @@ -101,25 +101,25 @@ Specify ICU data load path. (Overrides `NODE_ICU_DATA`.) added: v7.6.0 --> -Activate inspector on host:port and break at start of user script. -Default host:port is 127.0.0.1:9229. +Activate inspector on `host:port` and break at start of user script. +Default `host:port` is `127.0.0.1:9229`. ### `--inspect-port=[host:]port` -Set the host:port to be used when the inspector is activated. +Set the `host:port` to be used when the inspector is activated. Useful when activating the inspector by sending the `SIGUSR1` signal. -Default host is 127.0.0.1. +Default host is `127.0.0.1`. ### `--inspect[=[host:]port]` -Activate inspector on host:port. Default is 127.0.0.1:9229. +Activate inspector on `host:port`. Default is `127.0.0.1:9229`. V8 inspector integration allows tools such as Chrome DevTools and IDEs to debug and profile Node.js instances. The tools attach to Node.js instances via a @@ -461,7 +461,7 @@ options property is explicitly specified for a TLS or HTTPS client or server. added: v0.11.15 --> -Data path for ICU (Intl object) data. Will extend linked-in data when compiled +Data path for ICU (`Intl` object) data. Will extend linked-in data when compiled with small-icu support. ### `NODE_NO_WARNINGS=1` diff --git a/doc/api/cluster.md b/doc/api/cluster.md index 7c0baa36ff5491..b420645bb4dcc4 100644 --- a/doc/api/cluster.md +++ b/doc/api/cluster.md @@ -117,7 +117,7 @@ also be used for other use cases requiring worker processes. added: v0.7.0 --> -A Worker object contains all public information and method about a worker. +A `Worker` object contains all public information and method about a worker. In the master it can be obtained using `cluster.workers`. In a worker it can be obtained using `cluster.worker`. @@ -497,7 +497,7 @@ cluster.on('exit', (worker, code, signal) => { }); ``` -See [child_process event: `'exit'`][]. +See [`child_process` event: `'exit'`][]. ## Event: 'fork' diff --git a/doc/api/crypto.md b/doc/api/crypto.md index 0f9d3c80b5347a..b98693466a8efd 100644 --- a/doc/api/crypto.md +++ b/doc/api/crypto.md @@ -282,7 +282,7 @@ add padding to the input data to the appropriate block size. To disable the default padding call `cipher.setAutoPadding(false)`. When `autoPadding` is `false`, the length of the entire input data must be a -multiple of the cipher's block size or [`cipher.final()`][] will throw an Error. +multiple of the cipher's block size or [`cipher.final()`][] will throw an error. Disabling automatic padding is useful for non-standard padding, for instance using `0x0` instead of PKCS padding. @@ -810,7 +810,7 @@ to be a string; otherwise `privateKey` is expected to be a [`Buffer`][], If `privateKey` is not valid for the curve specified when the `ECDH` object was created, an error is thrown. Upon setting the private key, the associated -public point (key) is also generated and set in the ECDH object. +public point (key) is also generated and set in the `ECDH` object. ### ecdh.setPublicKey(publicKey[, encoding]) -If domains are in use, then all **new** EventEmitter objects (including +If domains are in use, then all **new** `EventEmitter` objects (including Stream objects, requests, responses, etc.) will be implicitly bound to the active domain at the time of their creation. Additionally, callbacks passed to lowlevel event loop requests (such as -to fs.open, or other callback-taking methods) will automatically be +to `fs.open()`, or other callback-taking methods) will automatically be bound to the active domain. If they throw, then the domain will catch the error. -In order to prevent excessive memory usage, Domain objects themselves +In order to prevent excessive memory usage, `Domain` objects themselves are not implicitly added as children of the active domain. If they were, then it would be too easy to prevent request and response objects from being properly garbage collected. -To nest Domain objects as children of a parent Domain they must be explicitly -added. +To nest `Domain` objects as children of a parent `Domain` they must be +explicitly added. Implicit binding routes thrown errors and `'error'` events to the -Domain's `'error'` event, but does not register the EventEmitter on the -Domain. +`Domain`'s `'error'` event, but does not register the `EventEmitter` on the +`Domain`. Implicit binding only takes care of thrown errors and `'error'` events. ## Explicit Binding @@ -271,14 +271,12 @@ serverDomain.run(() => { * Returns: {Domain} -Returns a new Domain object. - ## Class: Domain -The Domain class encapsulates the functionality of routing errors and -uncaught exceptions to the active Domain object. +The `Domain` class encapsulates the functionality of routing errors and +uncaught exceptions to the active `Domain` object. -Domain is a child class of [`EventEmitter`][]. To handle the errors that it +`Domain` is a child class of [`EventEmitter`][]. To handle the errors that it catches, listen to its `'error'` event. ### domain.members @@ -301,7 +299,7 @@ This also works with timers that are returned from [`setInterval()`][] and [`setTimeout()`][]. If their callback function throws, it will be caught by the domain `'error'` handler. -If the Timer or EventEmitter was already bound to a domain, it is removed +If the Timer or `EventEmitter` was already bound to a domain, it is removed from that one, and bound to this one instead. ### domain.bind(callback) @@ -444,7 +442,7 @@ than crashing the program. ## Domains and Promises As of Node.js 8.0.0, the handlers of Promises are run inside the domain in -which the call to `.then` or `.catch` itself was made: +which the call to `.then()` or `.catch()` itself was made: ```js const d1 = domain.create(); @@ -481,7 +479,7 @@ d2.run(() => { ``` Note that domains will not interfere with the error handling mechanisms for -Promises, i.e. no `'error'` event will be emitted for unhandled Promise +Promises, i.e. no `'error'` event will be emitted for unhandled `Promise` rejections. [`Error`]: errors.html#errors_class_error diff --git a/doc/api/errors.md b/doc/api/errors.md index b434dc3c610f8a..a2489c6ac2e7e7 100644 --- a/doc/api/errors.md +++ b/doc/api/errors.md @@ -18,7 +18,7 @@ errors: as attempting to open a file that does not exist, attempting to send data over a closed socket, etc; - And User-specified errors triggered by application code. -- Assertion Errors are a special class of error that can be triggered whenever +- `AssertionError`s are a special class of error that can be triggered whenever Node.js detects an exceptional logic violation that should never occur. These are raised typically by the `assert` module. @@ -32,7 +32,7 @@ to provide *at least* the properties available on that class. Node.js supports several mechanisms for propagating and handling errors that occur while an application is running. How these errors are reported and -handled depends entirely on the type of Error and the style of the API that is +handled depends entirely on the type of `Error` and the style of the API that is called. All JavaScript errors are handled as exceptions that *immediately* generate @@ -137,7 +137,7 @@ pattern referred to as an _error-first callback_ (sometimes referred to as a _Node.js style callback_). With this pattern, a callback function is passed to the method as an argument. When the operation either completes or an error is raised, the callback function is called with -the Error object (if any) passed as the first argument. If no error was +the `Error` object (if any) passed as the first argument. If no error was raised, the first argument will be passed as `null`. ```js @@ -422,7 +422,7 @@ they may only be caught by other contexts. A subclass of `Error` that indicates that a provided argument is not an allowable type. For example, passing a function to a parameter which expects a -string would be considered a TypeError. +string would be considered a `TypeError`. ```js require('url').parse(() => { }); @@ -440,7 +440,7 @@ A JavaScript exception is a value that is thrown as a result of an invalid operation or as the target of a `throw` statement. While it is not required that these values are instances of `Error` or classes which inherit from `Error`, all exceptions thrown by Node.js or the JavaScript runtime *will* be -instances of Error. +instances of `Error`. Some exceptions are *unrecoverable* at the JavaScript layer. Such exceptions will *always* cause the Node.js process to crash. Examples include `assert()` @@ -492,7 +492,7 @@ typically `E` followed by a sequence of capital letters. The `error.errno` property is a number or a string. The number is a **negative** value which corresponds to the error code defined -in [`libuv Error handling`]. See uv-errno.h header file +in [`libuv Error handling`]. See `uv-errno.h` header file (`deps/uv/include/uv-errno.h` in the Node.js source tree) for details. In case of a string, it is the same as `error.code`. @@ -1081,7 +1081,7 @@ An invalid or unsupported value was passed for a given argument. ### ERR_INVALID_ARRAY_LENGTH -An Array was not of the expected length or in a valid range. +An array was not of the expected length or in a valid range. ### ERR_INVALID_ASYNC_ID @@ -1516,7 +1516,7 @@ length. ### ERR_TLS_CERT_ALTNAME_INVALID While using TLS, the hostname/IP of the peer did not match any of the -subjectAltNames in its certificate. +`subjectAltNames` in its certificate. ### ERR_TLS_DH_PARAM_SIZE @@ -1570,12 +1570,12 @@ the `--without-v8-platform` flag. ### ERR_TRANSFORM_ALREADY_TRANSFORMING -A Transform stream finished while it was still transforming. +A `Transform` stream finished while it was still transforming. ### ERR_TRANSFORM_WITH_LENGTH_0 -A Transform stream finished with data still in the write buffer. +A `Transform` stream finished with data still in the write buffer. ### ERR_TTY_INIT_FAILED @@ -1645,7 +1645,7 @@ itself, although it is possible for user code to trigger it. ### ERR_V8BREAKITERATOR -The V8 BreakIterator API was used but the full ICU data set is not installed. +The V8 `BreakIterator` API was used but the full ICU data set is not installed. ### ERR_VALID_PERFORMANCE_ENTRY_TYPE diff --git a/doc/api/esm.md b/doc/api/esm.md index 4090e545fdb54b..a1e3cb149ab7d8 100644 --- a/doc/api/esm.md +++ b/doc/api/esm.md @@ -134,8 +134,8 @@ export async function resolve(specifier, } ``` -The parentURL is provided as `undefined` when performing main Node.js load -itself. +The `parentModuleURL` is provided as `undefined` when performing main Node.js +load itself. The default Node.js ES module resolution function is provided as a third argument to the resolver for easy compatibility workflows. diff --git a/doc/api/events.md b/doc/api/events.md index e5ae1e3fe7bf11..0dd9e4be5dbc6d 100644 --- a/doc/api/events.md +++ b/doc/api/events.md @@ -8,8 +8,7 @@ Much of the Node.js core API is built around an idiomatic asynchronous event-driven architecture in which certain kinds of objects (called "emitters") -periodically emit named events that cause Function objects ("listeners") to be -called. +emit named events that cause `Function` objects ("listeners") to be called. For instance: a [`net.Server`][] object emits an event each time a peer connects to it; a [`fs.ReadStream`][] emits an event when the file is opened; @@ -44,21 +43,21 @@ myEmitter.emit('event'); ## Passing arguments and `this` to listeners The `eventEmitter.emit()` method allows an arbitrary set of arguments to be -passed to the listener functions. It is important to keep in mind that when an -ordinary listener function is called by the `EventEmitter`, the standard `this` -keyword is intentionally set to reference the `EventEmitter` to which the +passed to the listener functions. It is important to keep in mind that when +an ordinary listener function is called, the standard `this` keyword +is intentionally set to reference the `EventEmitter` instance to which the listener is attached. ```js const myEmitter = new MyEmitter(); myEmitter.on('event', function(a, b) { - console.log(a, b, this); + console.log(a, b, this, this === myEmitter); // Prints: // a b MyEmitter { // domain: null, // _events: { event: [Function] }, // _eventsCount: 1, - // _maxListeners: undefined } + // _maxListeners: undefined } true }); myEmitter.emit('event', 'a', 'b'); ``` @@ -167,7 +166,7 @@ The `EventEmitter` class is defined and exposed by the `events` module: const EventEmitter = require('events'); ``` -All EventEmitters emit the event `'newListener'` when new listeners are +All `EventEmitter`s emit the event `'newListener'` when new listeners are added and `'removeListener'` when existing listeners are removed. ### Event: 'newListener' @@ -314,7 +313,7 @@ added: v6.0.0 - Returns: {Array} Returns an array listing the events for which the emitter has registered -listeners. The values in the array will be strings or Symbols. +listeners. The values in the array will be strings or `Symbol`s. ```js const EventEmitter = require('events'); @@ -589,7 +588,7 @@ added: v0.3.5 - `n` {integer} - Returns: {EventEmitter} -By default EventEmitters will print a warning if more than `10` listeners are +By default `EventEmitter`s will print a warning if more than `10` listeners are added for a particular event. This is a useful default that helps finding memory leaks. Obviously, not all events should be limited to just 10 listeners. The `emitter.setMaxListeners()` method allows the limit to be modified for this diff --git a/doc/api/fs.md b/doc/api/fs.md index 6e3628c8fbb2a0..30d6729d48c13d 100644 --- a/doc/api/fs.md +++ b/doc/api/fs.md @@ -177,7 +177,7 @@ Using WHATWG [`URL`][] objects might introduce platform-specific behaviors. On Windows, `file:` URLs with a hostname convert to UNC paths, while `file:` URLs with drive letters convert to local absolute paths. `file:` URLs without a -hostname nor a drive letter will result in a throw : +hostname nor a drive letter will result in a throw: ```js // On Windows : @@ -760,12 +760,35 @@ no effect on Windows (will behave like `fs.constants.F_OK`). The final argument, `callback`, is a callback function that is invoked with a possible error argument. If any of the accessibility checks fail, the error -argument will be an `Error` object. The following example checks if the file -`/etc/passwd` can be read and written by the current process. +argument will be an `Error` object. The following examples check if +`package.json` exists, and if it is readable or writable. ```js -fs.access('/etc/passwd', fs.constants.R_OK | fs.constants.W_OK, (err) => { - console.log(err ? 'no access!' : 'can read/write'); +const file = 'package.json'; + +// Check if the file exists in the current directory. +fs.access(file, fs.constants.F_OK, (err) => { + console.log(`${file} ${err ? 'does not exist' : 'exists'}`); +}); + +// Check if the file is readable. +fs.access(file, fs.constants.R_OK, (err) => { + console.log(`${file} ${err ? 'is not readable' : 'is readable'}`); +}); + +// Check if the file is writable. +fs.access(file, fs.constants.W_OK, (err) => { + console.log(`${file} ${err ? 'is not writable' : 'is writable'}`); +}); + +// Check if the file exists in the current directory, and if it is writable. +fs.access(file, fs.constants.F_OK | fs.constants.W_OK, (err) => { + if (err) { + console.error( + `${file} ${err.code === 'ENOENT' ? 'does not exist' : 'is read-only'}`); + } else { + console.log(`${file} exists, and it is writable`); + } }); ``` @@ -892,7 +915,7 @@ try { } ``` -## fs.appendFile(file, data[, options], callback) +## fs.appendFile(path, data[, options], callback) -* `file` {string|Buffer|URL|number} filename or file descriptor +* `path` {string|Buffer|URL|number} filename or file descriptor * `data` {string|Buffer} * `options` {Object|string} * `encoding` {string|null} **Default:** `'utf8'` @@ -939,7 +962,7 @@ If `options` is a string, then it specifies the encoding. Example: fs.appendFile('message.txt', 'data to append', 'utf8', callback); ``` -The `file` may be specified as a numeric file descriptor that has been opened +The `path` may be specified as a numeric file descriptor that has been opened for appending (using `fs.open()` or `fs.openSync()`). The file descriptor will not be closed automatically. @@ -955,7 +978,7 @@ fs.open('message.txt', 'a', (err, fd) => { }); ``` -## fs.appendFileSync(file, data[, options]) +## fs.appendFileSync(path, data[, options]) -* `file` {string|Buffer|URL|number} filename or file descriptor +* `path` {string|Buffer|URL|number} filename or file descriptor * `data` {string|Buffer} * `options` {Object|string} * `encoding` {string|null} **Default:** `'utf8'` @@ -994,7 +1017,7 @@ If `options` is a string, then it specifies the encoding. Example: fs.appendFileSync('message.txt', 'data to append', 'utf8'); ``` -The `file` may be specified as a numeric file descriptor that has been opened +The `path` may be specified as a numeric file descriptor that has been opened for appending (using `fs.open()` or `fs.openSync()`). The file descriptor will not be closed automatically. @@ -1730,7 +1753,7 @@ fs.ftruncate(fd, 4, (err) => { ``` If the file previously was shorter than `len` bytes, it is extended, and the -extended part is filled with null bytes ('\0'). For example, +extended part is filled with null bytes (`'\0'`). For example, ```js console.log(fs.readFileSync('temp.txt', 'utf8')); @@ -1748,7 +1771,7 @@ fs.ftruncate(fd, 10, (err) => { // ('Node.js\0\0\0' in UTF8) ``` -The last three bytes are null bytes ('\0'), to compensate the over-truncation. +The last three bytes are null bytes (`'\0'`), to compensate the over-truncation. ## fs.ftruncateSync(fd[, len]) * `atime` {number|string|Date} -* `mtime` {number|string|Date}` +* `mtime` {number|string|Date} * Returns: {Promise} Change the file system timestamps of the object referenced by the `FileHandle` @@ -3687,12 +3708,12 @@ condition, since other processes may change the file's state between the two calls. Instead, user code should open/read/write the file directly and handle the error raised if the file is not accessible. -### fsPromises.appendFile(file, data[, options]) +### fsPromises.appendFile(path, data[, options]) -* `file` {string|Buffer|URL|FileHandle} filename or `FileHandle` +* `path` {string|Buffer|URL|FileHandle} filename or `FileHandle` * `data` {string|Buffer} * `options` {Object|string} * `encoding` {string|null} **Default:** `'utf8'` @@ -3706,7 +3727,7 @@ resolved with no arguments upon success. If `options` is a string, then it specifies the encoding. -The `file` may be specified as a `FileHandle` that has been opened +The `path` may be specified as a `FileHandle` that has been opened for appending (using `fsPromises.open()`). ### fsPromises.chmod(path, mode) @@ -3878,7 +3899,7 @@ doTruncate().catch(console.error); ``` If the file previously was shorter than `len` bytes, it is extended, and the -extended part is filled with null bytes ('\0'). For example, +extended part is filled with null bytes (`'\0'`). For example, ```js console.log(fs.readFileSync('temp.txt', 'utf8')); @@ -3893,7 +3914,7 @@ async function doTruncate() { doTruncate().catch(console.error); ``` -The last three bytes are null bytes ('\0'), to compensate the over-truncation. +The last three bytes are null bytes (`'\0'`), to compensate the over-truncation. ### fsPromises.futimes(filehandle, atime, mtime) @@ -924,10 +930,7 @@ This method is identical to [`server.listen()`][] from [`net.Server`][]. added: v5.7.0 --> -* {boolean} - -A Boolean indicating whether or not the server is listening for -connections. +* {boolean} Indicates whether or not the server is listening for connections. ### server.maxHeadersCount - `options` {Object} - * `IncomingMessage` {http.IncomingMessage} Specifies the IncomingMessage class - to be used. Useful for extending the original `IncomingMessage`. + * `IncomingMessage` {http.IncomingMessage} Specifies the `IncomingMessage` + class to be used. Useful for extending the original `IncomingMessage`. **Default:** `IncomingMessage`. - * `ServerResponse` {http.ServerResponse} Specifies the ServerResponse class to - be used. Useful for extending the original `ServerResponse`. **Default:** + * `ServerResponse` {http.ServerResponse} Specifies the `ServerResponse` class + to be used. Useful for extending the original `ServerResponse`. **Default:** `ServerResponse`. - `requestListener` {Function} @@ -1868,8 +1871,8 @@ changes: v6 will be used. * `port` {number} Port of remote server. **Default:** `80`. * `localAddress` {string} Local interface to bind for network connections. - * `socketPath` {string} Unix Domain Socket (use one of host:port or - socketPath). + * `socketPath` {string} Unix Domain Socket (use one of `host:port` or + `socketPath`). * `method` {string} A string specifying the HTTP request method. **Default:** `'GET'`. * `path` {string} Request path. Should include query string if any. @@ -2072,7 +2075,7 @@ not abort the request or do anything besides add a `'timeout'` event. [`socket.setKeepAlive()`]: net.html#net_socket_setkeepalive_enable_initialdelay [`socket.setNoDelay()`]: net.html#net_socket_setnodelay_nodelay [`socket.setTimeout()`]: net.html#net_socket_settimeout_timeout_callback +[`socket.unref()`]: net.html#net_socket_unref [`url.parse()`]: url.html#url_url_parse_urlstring_parsequerystring_slashesdenotehost [Readable Stream]: stream.html#stream_class_stream_readable [Writable Stream]: stream.html#stream_class_stream_writable -[socket.unref()]: net.html#net_socket_unref diff --git a/doc/api/http2.md b/doc/api/http2.md index b3aac4741eeeca..a8f762173404f1 100644 --- a/doc/api/http2.md +++ b/doc/api/http2.md @@ -108,7 +108,7 @@ have occasion to work with the `Http2Session` object directly, with most actions typically taken through interactions with either the `Http2Server` or `Http2Stream` objects. -#### Http2Session and Sockets +#### `Http2Session` and Sockets Every `Http2Session` instance is associated with exactly one [`net.Socket`][] or [`tls.TLSSocket`][] when it is created. When either the `Socket` or the @@ -232,10 +232,13 @@ session.on('remoteSettings', (settings) => { added: v8.4.0 --> -The `'stream'` event is emitted when a new `Http2Stream` is created. When -invoked, the handler function will receive a reference to the `Http2Stream` -object, a [HTTP/2 Headers Object][], and numeric flags associated with the -creation of the stream. +* `stream` {Http2Stream} A reference to the stream +* `headers` {HTTP/2 Headers Object} An object describing the headers +* `flags` {number} The associated numeric flags +* `rawHeaders` {Array} An array containing the raw header names followed by + their respective values. + +The `'stream'` event is emitted when a new `Http2Stream` is created. ```js const http2 = require('http2'); @@ -410,7 +413,7 @@ added: v9.4.0 * {string[]|undefined} If the `Http2Session` is connected to a `TLSSocket`, the `originSet` property -will return an Array of origins for which the `Http2Session` may be +will return an `Array` of origins for which the `Http2Session` may be considered authoritative. #### http2session.pendingSettingsAck @@ -500,12 +503,12 @@ added: v8.4.0 * {net.Socket|tls.TLSSocket} -Returns a Proxy object that acts as a `net.Socket` (or `tls.TLSSocket`) but +Returns a `Proxy` object that acts as a `net.Socket` (or `tls.TLSSocket`) but limits available methods to ones safe to use with HTTP/2. `destroy`, `emit`, `end`, `pause`, `read`, `resume`, and `write` will throw an error with code `ERR_HTTP2_NO_SOCKET_MANIPULATION`. See -[Http2Session and Sockets][] for more information. +[`Http2Session` and Sockets][] for more information. `setTimeout` method will be called on this `Http2Session`. @@ -592,8 +595,9 @@ added: v9.4.0 * `alt` {string} A description of the alternative service configuration as defined by [RFC 7838][]. * `originOrStream` {number|string|URL|Object} Either a URL string specifying - the origin (or an Object with an `origin` property) or the numeric identifier - of an active `Http2Stream` as given by the `http2stream.id` property. + the origin (or an `Object` with an `origin` property) or the numeric + identifier of an active `Http2Stream` as given by the `http2stream.id` + property. Submits an `ALTSVC` frame (as defined by [RFC 7838][]) to the connected client. @@ -1041,7 +1045,7 @@ Provides miscellaneous information about the current state of the * `localWindowSize` {number} The number of bytes the connected peer may send for this `Http2Stream` without receiving a `WINDOW_UPDATE`. * `state` {number} A flag indicating the low-level current state of the - `Http2Stream` as determined by nghttp2. + `Http2Stream` as determined by `nghttp2`. * `localClose` {number} `true` if this `Http2Stream` has been closed locally. * `remoteClose` {number} `true` if this `Http2Stream` has been closed remotely. @@ -1140,7 +1144,7 @@ added: v8.4.0 The `'response'` event is emitted when a response `HEADERS` frame has been received for this stream from the connected HTTP/2 server. The listener is -invoked with two arguments: an Object containing the received +invoked with two arguments: an `Object` containing the received [HTTP/2 Headers Object][], and flags associated with the headers. ```js @@ -1180,7 +1184,7 @@ added: v8.4.0 * {boolean} -Boolean (read-only). True if headers were sent, false otherwise. +True if headers were sent, false otherwise (read-only). #### http2stream.pushAllowed -* {Object} Module object +* {module} The module that first required this one. diff --git a/doc/api/n-api.md b/doc/api/n-api.md index fcbd4decb3538a..17958c8daf255e 100644 --- a/doc/api/n-api.md +++ b/doc/api/n-api.md @@ -40,10 +40,10 @@ there to be one or more C++ wrapper modules that provide an inlineable C++ API. Binaries built with these wrapper modules will depend on the symbols for the N-API C based functions exported by Node.js. These wrappers are not part of N-API, nor will they be maintained as part of Node.js. One such -example is: [node-api](https://github.com/nodejs/node-api). +example is: [node-addon-api](https://github.com/nodejs/node-addon-api). In order to use the N-API functions, include the file -[node_api.h](https://github.com/nodejs/node/blob/master/src/node_api.h) +[`node_api.h`](https://github.com/nodejs/node/blob/master/src/node_api.h) which is located in the src directory in the node development tree: ```C @@ -73,7 +73,9 @@ typedef enum { napi_generic_failure, napi_pending_exception, napi_cancelled, - napi_status_last + napi_escape_called_twice, + napi_handle_scope_mismatch, + napi_callback_scope_mismatch } napi_status; ``` If additional information is required upon an API returning a failed status, @@ -434,7 +436,7 @@ NODE_EXTERN napi_status napi_create_error(napi_env env, ``` - `[in] env`: The environment that the API is invoked under. - `[in] code`: Optional `napi_value` with the string for the error code to - be associated with the error. +be associated with the error. - `[in] msg`: `napi_value` that references a JavaScript `String` to be used as the message for the `Error`. - `[out] result`: `napi_value` representing the error created. @@ -455,7 +457,7 @@ NODE_EXTERN napi_status napi_create_type_error(napi_env env, ``` - `[in] env`: The environment that the API is invoked under. - `[in] code`: Optional `napi_value` with the string for the error code to - be associated with the error. +be associated with the error. - `[in] msg`: `napi_value` that references a JavaScript `String` to be used as the message for the `Error`. - `[out] result`: `napi_value` representing the error created. @@ -476,7 +478,7 @@ NODE_EXTERN napi_status napi_create_range_error(napi_env env, ``` - `[in] env`: The environment that the API is invoked under. - `[in] code`: Optional `napi_value` with the string for the error code to - be associated with the error. +be associated with the error. - `[in] msg`: `napi_value` that references a JavaScript `String` to be used as the message for the `Error`. - `[out] result`: `napi_value` representing the error created. @@ -589,7 +591,7 @@ that has a loop which iterates through the elements in a large array: ```C for (int i = 0; i < 1000000; i++) { napi_value result; - napi_status status = napi_get_element(e, object, i, &result); + napi_status status = napi_get_element(env, object, i, &result); if (status != napi_ok) { break; } @@ -626,7 +628,7 @@ for (int i = 0; i < 1000000; i++) { break; } napi_value result; - status = napi_get_element(e, object, i, &result); + status = napi_get_element(env, object, i, &result); if (status != napi_ok) { break; } @@ -913,8 +915,7 @@ provided by the addon: napi_value Init(napi_env env, napi_value exports) { napi_status status; napi_property_descriptor desc = - {"hello", Method, 0, 0, 0, napi_default, 0}; - if (status != napi_ok) return NULL; + {"hello", NULL, Method, NULL, NULL, NULL, napi_default, NULL}; status = napi_define_properties(env, exports, 1, &desc); if (status != napi_ok) return NULL; return exports; @@ -941,7 +942,7 @@ To define a class so that new instances can be created (often used with napi_value Init(napi_env env, napi_value exports) { napi_status status; napi_property_descriptor properties[] = { - { "value", NULL, GetValue, SetValue, 0, napi_default, 0 }, + { "value", NULL, NULL, GetValue, SetValue, NULL, napi_default, NULL }, DECLARE_NAPI_METHOD("plusOne", PlusOne), DECLARE_NAPI_METHOD("multiply", Multiply), }; @@ -1275,7 +1276,7 @@ This API allocates a `node::Buffer` object and initializes it with data backed by the passed in buffer. While this is still a fully-supported data structure, in most cases using a `TypedArray` will suffice. -For Node.js >=4 `Buffers` are Uint8Arrays. +For Node.js >=4 `Buffers` are `Uint8Array`s. #### napi_create_function -A Boolean indicating whether or not the server is listening for -connections. +* {boolean} Indicates whether or not the server is listening for connections. ### server.maxConnections @@ -234,7 +234,7 @@ process.on('unhandledRejection', (reason, p) => { somePromise.then((res) => { return reportToUser(JSON.pasre(res)); // note the typo (`pasre`) -}); // no `.catch` or `.then` +}); // no `.catch()` or `.then()` ``` The following will also trigger the `'unhandledRejection'` event to be @@ -524,7 +524,7 @@ added: v0.7.7 * {Object} -The `process.config` property returns an Object containing the JavaScript +The `process.config` property returns an `Object` containing the JavaScript representation of the configure options used to compile the current Node.js executable. This is the same as the `config.gypi` file that was produced when running the `./configure` script. @@ -699,10 +699,10 @@ added: v8.0.0 * `warning` {string|Error} The warning to emit. * `options` {Object} - * `type` {string} When `warning` is a String, `type` is the name to use + * `type` {string} When `warning` is a `String`, `type` is the name to use for the *type* of warning being emitted. **Default:** `'Warning'`. * `code` {string} A unique identifier for the warning instance being emitted. - * `ctor` {Function} When `warning` is a String, `ctor` is an optional + * `ctor` {Function} When `warning` is a `String`, `ctor` is an optional function used to limit the generated stack trace. **Default:** `process.emitWarning`. * `detail` {string} Additional text to include with the error. @@ -744,10 +744,10 @@ added: v6.0.0 --> * `warning` {string|Error} The warning to emit. -* `type` {string} When `warning` is a String, `type` is the name to use +* `type` {string} When `warning` is a `String`, `type` is the name to use for the *type* of warning being emitted. **Default:** `'Warning'`. * `code` {string} A unique identifier for the warning instance being emitted. -* `ctor` {Function} When `warning` is a String, `ctor` is an optional +* `ctor` {Function} When `warning` is a `String`, `ctor` is an optional function used to limit the generated stack trace. **Default:** `process.emitWarning`. @@ -1151,12 +1151,12 @@ added: v0.7.6 * Returns: {integer[]} The `process.hrtime()` method returns the current high-resolution real time -in a `[seconds, nanoseconds]` tuple Array, where `nanoseconds` is the +in a `[seconds, nanoseconds]` tuple `Array`, where `nanoseconds` is the remaining part of the real time that can't be represented in second precision. `time` is an optional parameter that must be the result of a previous `process.hrtime()` call to diff with the current time. If the parameter -passed in is not a tuple Array, a `TypeError` will be thrown. Passing in a +passed in is not a tuple `Array`, a `TypeError` will be thrown. Passing in a user-defined array instead of the result of a previous call to `process.hrtime()` will lead to undefined behavior. @@ -1401,7 +1401,7 @@ function definitelyAsync(arg, cb) { The next tick queue is completely drained on each pass of the event loop **before** additional I/O is processed. As a result, recursively setting -nextTick callbacks will block any I/O from happening, just like a +`nextTick()` callbacks will block any I/O from happening, just like a `while(true);` loop. ## process.noDeprecation @@ -1482,8 +1482,8 @@ changes: * {Object} -The `process.release` property returns an Object containing metadata related to -the current release, including URLs for the source tarball and headers-only +The `process.release` property returns an `Object` containing metadata related +to the current release, including URLs for the source tarball and headers-only tarball. `process.release` contains the following properties: @@ -1741,7 +1741,7 @@ The `process.stdout` property returns a stream connected to stream) unless fd `1` refers to a file, in which case it is a [Writable][] stream. -For example, to copy process.stdin to process.stdout: +For example, to copy `process.stdin` to `process.stdout`: ```js process.stdin.pipe(process.stdout); @@ -1961,7 +1961,7 @@ cases: * `7` **Internal Exception Handler Run-Time Failure** - There was an uncaught exception, and the internal fatal exception handler function itself threw an error while attempting to handle it. This - can happen, for example, if a [`'uncaughtException'`][] or + can happen, for example, if an [`'uncaughtException'`][] or `domain.on('error')` handler throws an error. * `8` - Unused. In previous versions of Node.js, exit code 8 sometimes indicated an uncaught exception. diff --git a/doc/api/readline.md b/doc/api/readline.md index ead469e97c9f27..a2a4adf3093a9a 100644 --- a/doc/api/readline.md +++ b/doc/api/readline.md @@ -303,7 +303,7 @@ rl.write('Delete this!'); rl.write(null, { ctrl: true, name: 'u' }); ``` -The `rl.write()` method will write the data to the `readline` Interface's +The `rl.write()` method will write the data to the `readline` `Interface`'s `input` *as if it were provided by the user*. ## readline.clearLine(stream, dir) @@ -401,9 +401,9 @@ a `'resize'` event on the `output` if or when the columns ever change ### Use of the `completer` Function The `completer` function takes the current line entered by the user -as an argument, and returns an Array with 2 entries: +as an argument, and returns an `Array` with 2 entries: -* An Array with matching entries for the completion. +* An `Array` with matching entries for the completion. * The substring that was used for the matching. For instance: `[[substr1, substr2, ...], originalsubstring]`. @@ -447,7 +447,7 @@ added: v0.7.7 * `interface` {readline.Interface} The `readline.emitKeypressEvents()` method causes the given [Readable][] -`stream` to begin emitting `'keypress'` events corresponding to received input. +stream to begin emitting `'keypress'` events corresponding to received input. Optionally, `interface` specifies a `readline.Interface` instance for which autocompletion is disabled when copy-pasted input is detected. diff --git a/doc/api/repl.md b/doc/api/repl.md index 995dabaa5d863d..356d5ef47e206f 100644 --- a/doc/api/repl.md +++ b/doc/api/repl.md @@ -63,7 +63,7 @@ The following key combinations in the REPL have these special effects: When pressed twice on a blank line, has the same effect as the `.exit` command. * `-D` - Has the same effect as the `.exit` command. -* `` - When pressed on a blank line, displays global and local(scope) +* `` - When pressed on a blank line, displays global and local (scope) variables. When pressed while entering other input, displays relevant autocompletion options. @@ -141,6 +141,17 @@ global or scoped variable, the input `fs` will be evaluated on-demand as > fs.createReadStream('./some/file'); ``` +#### Global Uncaught Exceptions + +The REPL uses the [`domain`][] module to catch all uncaught exceptions for that +REPL session. + +This use of the [`domain`][] module in the REPL has these side effects: + +* Uncaught exceptions do not emit the [`'uncaughtException'`][] event. +* Trying to use [`process.setUncaughtExceptionCaptureCallback()`][] throws + an [`ERR_DOMAIN_CANNOT_SET_UNCAUGHT_EXCEPTION_CAPTURE`][] error. + #### Assignment of the `_` (underscore) variable -Both [Writable][] and [Readable][] streams will store data in an internal +Both [`Writable`][] and [`Readable`][] streams will store data in an internal buffer that can be retrieved using `writable.writableBuffer` or `readable.readableBuffer`, respectively. @@ -74,7 +74,7 @@ passed into the streams constructor. For normal streams, the `highWaterMark` option specifies a [total number of bytes][hwm-gotcha]. For streams operating in object mode, the `highWaterMark` specifies a total number of objects. -Data is buffered in Readable streams when the implementation calls +Data is buffered in `Readable` streams when the implementation calls [`stream.push(chunk)`][stream-push]. If the consumer of the Stream does not call [`stream.read()`][stream-read], the data will sit in the internal queue until it is consumed. @@ -85,7 +85,7 @@ underlying resource until the data currently buffered can be consumed (that is, the stream will stop calling the internal `readable._read()` method that is used to fill the read buffer). -Data is buffered in Writable streams when the +Data is buffered in `Writable` streams when the [`writable.write(chunk)`][stream-write] method is called repeatedly. While the total size of the internal write buffer is below the threshold set by `highWaterMark`, calls to `writable.write()` will return `true`. Once @@ -96,15 +96,15 @@ A key goal of the `stream` API, particularly the [`stream.pipe()`] method, is to limit the buffering of data to acceptable levels such that sources and destinations of differing speeds will not overwhelm the available memory. -Because [Duplex][] and [Transform][] streams are both Readable and Writable, -each maintain *two* separate internal buffers used for reading and writing, -allowing each side to operate independently of the other while maintaining an -appropriate and efficient flow of data. For example, [`net.Socket`][] instances -are [Duplex][] streams whose Readable side allows consumption of data received -*from* the socket and whose Writable side allows writing data *to* the socket. -Because data may be written to the socket at a faster or slower rate than data -is received, it is important for each side to operate (and buffer) independently -of the other. +Because [`Duplex`][] and [`Transform`][] streams are both `Readable` and +`Writable`, each maintain *two* separate internal buffers used for reading and +writing, allowing each side to operate independently of the other while +maintaining an appropriate and efficient flow of data. For example, +[`net.Socket`][] instances are [`Duplex`][] streams whose `Readable` side allows +consumption of data received *from* the socket and whose `Writable` side allows +writing data *to* the socket. Because data may be written to the socket at a +faster or slower rate than data is received, it is important for each side to +operate (and buffer) independently of the other. ## API for Stream Consumers @@ -156,17 +156,18 @@ server.listen(1337); // error: Unexpected token o in JSON at position 1 ``` -[Writable][] streams (such as `res` in the example) expose methods such as +[`Writable`][] streams (such as `res` in the example) expose methods such as `write()` and `end()` that are used to write data onto the stream. -[Readable][] streams use the [`EventEmitter`][] API for notifying application +[`Readable`][] streams use the [`EventEmitter`][] API for notifying application code when data is available to be read off the stream. That available data can be read from the stream in multiple ways. -Both [Writable][] and [Readable][] streams use the [`EventEmitter`][] API in +Both [`Writable`][] and [`Readable`][] streams use the [`EventEmitter`][] API in various ways to communicate the current state of the stream. -[Duplex][] and [Transform][] streams are both [Writable][] and [Readable][]. +[`Duplex`][] and [`Transform`][] streams are both [`Writable`][] and +[`Readable`][]. Applications that are either writing data to or consuming data from a stream are not required to implement the stream interfaces directly and will generally @@ -180,7 +181,7 @@ section [API for Stream Implementers][]. Writable streams are an abstraction for a *destination* to which data is written. -Examples of [Writable][] streams include: +Examples of [`Writable`][] streams include: * [HTTP requests, on the client][] * [HTTP responses, on the server][] @@ -191,14 +192,14 @@ Examples of [Writable][] streams include: * [child process stdin][] * [`process.stdout`][], [`process.stderr`][] -Some of these examples are actually [Duplex][] streams that implement the -[Writable][] interface. +Some of these examples are actually [`Duplex`][] streams that implement the +[`Writable`][] interface. -All [Writable][] streams implement the interface defined by the +All [`Writable`][] streams implement the interface defined by the `stream.Writable` class. -While specific instances of [Writable][] streams may differ in various ways, -all Writable streams follow the same fundamental usage pattern as illustrated +While specific instances of [`Writable`][] streams may differ in various ways, +all `Writable` streams follow the same fundamental usage pattern as illustrated in the example below: ```js @@ -224,7 +225,7 @@ The `'close'` event is emitted when the stream and any of its underlying resources (a file descriptor, for example) have been closed. The event indicates that no more events will be emitted, and no further computation will occur. -Not all Writable streams will emit the `'close'` event. +Not all `Writable` streams will emit the `'close'` event. ##### Event: 'drain' -Duplex streams are streams that implement both the [Readable][] and -[Writable][] interfaces. +Duplex streams are streams that implement both the [`Readable`][] and +[`Writable`][] interfaces. -Examples of Duplex streams include: +Examples of `Duplex` streams include: * [TCP sockets][] * [zlib streams][zlib] @@ -1270,11 +1272,11 @@ added: v0.9.4 -Transform streams are [Duplex][] streams where the output is in some way -related to the input. Like all [Duplex][] streams, Transform streams -implement both the [Readable][] and [Writable][] interfaces. +Transform streams are [`Duplex`][] streams where the output is in some way +related to the input. Like all [`Duplex`][] streams, `Transform` streams +implement both the [`Readable`][] and [`Writable`][] interfaces. -Examples of Transform streams include: +Examples of `Transform` streams include: * [zlib streams][zlib] * [crypto streams][crypto] @@ -1436,7 +1438,7 @@ on the type of stream being created, as detailed in the chart below:

Reading only

-

[Readable](#stream_class_stream_readable)

+

[`Readable`](#stream_class_stream_readable)

[_read][stream-_read]

@@ -1447,7 +1449,7 @@ on the type of stream being created, as detailed in the chart below:

Writing only

-

[Writable](#stream_class_stream_writable)

+

[`Writable`](#stream_class_stream_writable)

@@ -1462,7 +1464,7 @@ on the type of stream being created, as detailed in the chart below:

Reading and writing

-

[Duplex](#stream_class_stream_duplex)

+

[`Duplex`](#stream_class_stream_duplex)

@@ -1477,7 +1479,7 @@ on the type of stream being created, as detailed in the chart below:

Operate on written data, then read the result

-

[Transform](#stream_class_stream_transform)

+

[`Transform`](#stream_class_stream_transform)

@@ -1516,9 +1518,9 @@ const myWritable = new Writable({ ### Implementing a Writable Stream -The `stream.Writable` class is extended to implement a [Writable][] stream. +The `stream.Writable` class is extended to implement a [`Writable`][] stream. -Custom Writable streams *must* call the `new stream.Writable([options])` +Custom `Writable` streams *must* call the `new stream.Writable([options])` constructor and implement the `writable._write()` method. The `writable._writev()` method *may* also be implemented. @@ -1536,7 +1538,7 @@ changes: [`stream.write()`][stream-write] starts returning `false`. **Default:** `16384` (16kb), or `16` for `objectMode` streams. * `decodeStrings` {boolean} Whether or not to decode strings into - Buffers before passing them to [`stream._write()`][stream-_write]. + `Buffer`s before passing them to [`stream._write()`][stream-_write]. **Default:** `true`. * `objectMode` {boolean} Whether or not the [`stream.write(anyObj)`][stream-write] is a valid operation. When set, @@ -1606,16 +1608,16 @@ const myWritable = new Writable({ * `callback` {Function} Call this function (optionally with an error argument) when processing is complete for the supplied chunk. -All Writable stream implementations must provide a +All `Writable` stream implementations must provide a [`writable._write()`][stream-_write] method to send data to the underlying resource. -[Transform][] streams provide their own implementation of the +[`Transform`][] streams provide their own implementation of the [`writable._write()`][stream-_write]. This function MUST NOT be called by application code directly. It should be -implemented by child classes, and called by the internal Writable class methods -only. +implemented by child classes, and called by the internal `Writable` class +methods only. The `callback` method must be called to signal either that the write completed successfully or failed with an error. The first argument passed to the @@ -1647,8 +1649,8 @@ user programs. argument) to be invoked when processing is complete for the supplied chunks. This function MUST NOT be called by application code directly. It should be -implemented by child classes, and called by the internal Writable class methods -only. +implemented by child classes, and called by the internal `Writable` class +methods only. The `writable._writev()` method may be implemented in addition to `writable._write()` in stream implementations that are capable of processing @@ -1680,7 +1682,7 @@ added: v8.0.0 argument) when finished writing any remaining data. The `_final()` method **must not** be called directly. It may be implemented -by child classes, and if so, will be called by the internal Writable +by child classes, and if so, will be called by the internal `Writable` class methods only. This optional function will be called before the stream closes, delaying the @@ -1692,13 +1694,13 @@ or write buffered data before a stream ends. It is recommended that errors occurring during the processing of the `writable._write()` and `writable._writev()` methods are reported by invoking the callback and passing the error as the first argument. This will cause an -`'error'` event to be emitted by the Writable. Throwing an Error from within +`'error'` event to be emitted by the `Writable`. Throwing an `Error` from within `writable._write()` can result in unexpected and inconsistent behavior depending on how the stream is being used. Using the callback ensures consistent and predictable handling of errors. -If a Readable stream pipes into a Writable stream when Writable emits an -error, the Readable stream will be unpiped. +If a `Readable` stream pipes into a `Writable` stream when `Writable` emits an +error, the `Readable` stream will be unpiped. ```js const { Writable } = require('stream'); @@ -1717,9 +1719,9 @@ const myWritable = new Writable({ #### An Example Writable Stream The following illustrates a rather simplistic (and somewhat pointless) custom -Writable stream implementation. While this specific Writable stream instance +`Writable` stream implementation. While this specific `Writable` stream instance is not of any real particular usefulness, the example illustrates each of the -required elements of a custom [Writable][] stream instance: +required elements of a custom [`Writable`][] stream instance: ```js const { Writable } = require('stream'); @@ -1745,7 +1747,7 @@ class MyWritable extends Writable { Decoding buffers is a common task, for instance, when using transformers whose input is a string. This is not a trivial process when using multi-byte characters encoding, such as UTF-8. The following example shows how to decode -multi-byte strings using `StringDecoder` and [Writable][]. +multi-byte strings using `StringDecoder` and [`Writable`][]. ```js const { Writable } = require('stream'); @@ -1782,9 +1784,9 @@ console.log(w.data); // currency: € ### Implementing a Readable Stream -The `stream.Readable` class is extended to implement a [Readable][] stream. +The `stream.Readable` class is extended to implement a [`Readable`][] stream. -Custom Readable streams *must* call the `new stream.Readable([options])` +Custom `Readable` streams *must* call the `new stream.Readable([options])` constructor and implement the `readable._read()` method. #### new stream.Readable([options]) @@ -1797,7 +1799,7 @@ constructor and implement the `readable._read()` method. strings using the specified encoding. **Default:** `null`. * `objectMode` {boolean} Whether this stream should behave as a stream of objects. Meaning that [`stream.read(n)`][stream-read] returns - a single value instead of a Buffer of size n. **Default:** `false`. + a single value instead of a `Buffer` of size `n`. **Default:** `false`. * `read` {Function} Implementation for the [`stream._read()`][stream-_read] method. * `destroy` {Function} Implementation for the @@ -1847,16 +1849,16 @@ added: v0.9.4 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/17979 - description: call _read() only once per microtick + description: call `_read()` only once per microtick --> * `size` {number} Number of bytes to read asynchronously This function MUST NOT be called by application code directly. It should be -implemented by child classes, and called by the internal Readable class methods -only. +implemented by child classes, and called by the internal `Readable` class +methods only. -All Readable stream implementations must provide an implementation of the +All `Readable` stream implementations must provide an implementation of the `readable._read()` method to fetch data from the underlying resource. When `readable._read()` is called, if data is available from the resource, the @@ -1906,7 +1908,7 @@ changes: string, `Buffer` or `Uint8Array`. For object mode streams, `chunk` may be any JavaScript value. * `encoding` {string} Encoding of string chunks. Must be a valid - Buffer encoding, such as `'utf8'` or `'ascii'` + `Buffer` encoding, such as `'utf8'` or `'ascii'`. * Returns: {boolean} `true` if additional chunks of data may continued to be pushed; `false` otherwise. @@ -1915,18 +1917,18 @@ be added to the internal queue for users of the stream to consume. Passing `chunk` as `null` signals the end of the stream (EOF), after which no more data can be written. -When the Readable is operating in paused mode, the data added with +When the `Readable` is operating in paused mode, the data added with `readable.push()` can be read out by calling the [`readable.read()`][stream-read] method when the [`'readable'`][] event is emitted. -When the Readable is operating in flowing mode, the data added with +When the `Readable` is operating in flowing mode, the data added with `readable.push()` will be delivered by emitting a `'data'` event. The `readable.push()` method is designed to be as flexible as possible. For example, when wrapping a lower-level source that provides some form of pause/resume mechanism, and a data callback, the low-level source can be wrapped -by the custom Readable instance as illustrated in the following example: +by the custom `Readable` instance as illustrated in the following example: ```js // source is an object with readStop() and readStart() methods, @@ -1959,8 +1961,8 @@ class SourceWrapper extends Readable { } ``` -The `readable.push()` method is intended be called only by Readable -Implementers, and only from within the `readable._read()` method. +The `readable.push()` method is intended be called only by `Readable` +implementers, and only from within the `readable._read()` method. For streams not operating in object mode, if the `chunk` parameter of `readable.push()` is `undefined`, it will be treated as empty string or @@ -1970,7 +1972,7 @@ buffer. See [`readable.push('')`][] for more information. It is recommended that errors occurring during the processing of the `readable._read()` method are emitted using the `'error'` event rather than -being thrown. Throwing an Error from within `readable._read()` can result in +being thrown. Throwing an `Error` from within `readable._read()` can result in unexpected and inconsistent behavior depending on whether the stream is operating in flowing or paused mode. Using the `'error'` event ensures consistent and predictable handling of errors. @@ -1994,7 +1996,7 @@ const myReadable = new Readable({ -The following is a basic example of a Readable stream that emits the numerals +The following is a basic example of a `Readable` stream that emits the numerals from 1 to 1,000,000 in ascending order, and then ends. ```js @@ -2022,11 +2024,11 @@ class Counter extends Readable { ### Implementing a Duplex Stream -A [Duplex][] stream is one that implements both [Readable][] and [Writable][], -such as a TCP socket connection. +A [`Duplex`][] stream is one that implements both [`Readable`][] and +[`Writable`][], such as a TCP socket connection. Because JavaScript does not have support for multiple inheritance, the -`stream.Duplex` class is extended to implement a [Duplex][] stream (as opposed +`stream.Duplex` class is extended to implement a [`Duplex`][] stream (as opposed to extending the `stream.Readable` *and* `stream.Writable` classes). The `stream.Duplex` class prototypically inherits from `stream.Readable` and @@ -2034,7 +2036,7 @@ parasitically from `stream.Writable`, but `instanceof` will work properly for both base classes due to overriding [`Symbol.hasInstance`][] on `stream.Writable`. -Custom Duplex streams *must* call the `new stream.Duplex([options])` +Custom `Duplex` streams *must* call the `new stream.Duplex([options])` constructor and implement *both* the `readable._read()` and `writable._write()` methods. @@ -2047,7 +2049,7 @@ changes: are supported now. --> -* `options` {Object} Passed to both Writable and Readable +* `options` {Object} Passed to both `Writable` and `Readable` constructors. Also has the following fields: * `allowHalfOpen` {boolean} If set to `false`, then the stream will automatically end the writable side when the readable side ends. @@ -2103,13 +2105,13 @@ const myDuplex = new Duplex({ #### An Example Duplex Stream -The following illustrates a simple example of a Duplex stream that wraps a +The following illustrates a simple example of a `Duplex` stream that wraps a hypothetical lower-level source object to which data can be written, and from which data can be read, albeit using an API that is not compatible with Node.js streams. -The following illustrates a simple example of a Duplex stream that buffers -incoming written data via the [Writable][] interface that is read back out -via the [Readable][] interface. +The following illustrates a simple example of a `Duplex` stream that buffers +incoming written data via the [`Writable`][] interface that is read back out +via the [`Readable`][] interface. ```js const { Duplex } = require('stream'); @@ -2137,20 +2139,20 @@ class MyDuplex extends Duplex { } ``` -The most important aspect of a Duplex stream is that the Readable and Writable -sides operate independently of one another despite co-existing within a single -object instance. +The most important aspect of a `Duplex` stream is that the `Readable` and +`Writable` sides operate independently of one another despite co-existing within +a single object instance. #### Object Mode Duplex Streams -For Duplex streams, `objectMode` can be set exclusively for either the Readable -or Writable side using the `readableObjectMode` and `writableObjectMode` options -respectively. +For `Duplex` streams, `objectMode` can be set exclusively for either the +`Readable` or `Writable` side using the `readableObjectMode` and +`writableObjectMode` options respectively. -In the following example, for instance, a new Transform stream (which is a -type of [Duplex][] stream) is created that has an object mode Writable side +In the following example, for instance, a new `Transform` stream (which is a +type of [`Duplex`][] stream) is created that has an object mode `Writable` side that accepts JavaScript numbers that are converted to hexadecimal strings on -the Readable side. +the `Readable` side. ```js const { Transform } = require('stream'); @@ -2184,31 +2186,31 @@ myTransform.write(100); ### Implementing a Transform Stream -A [Transform][] stream is a [Duplex][] stream where the output is computed +A [`Transform`][] stream is a [`Duplex`][] stream where the output is computed in some way from the input. Examples include [zlib][] streams or [crypto][] streams that compress, encrypt, or decrypt data. There is no requirement that the output be the same size as the input, the same -number of chunks, or arrive at the same time. For example, a Hash stream will +number of chunks, or arrive at the same time. For example, a `Hash` stream will only ever have a single chunk of output which is provided when the input is ended. A `zlib` stream will produce output that is either much smaller or much larger than its input. -The `stream.Transform` class is extended to implement a [Transform][] stream. +The `stream.Transform` class is extended to implement a [`Transform`][] stream. The `stream.Transform` class prototypically inherits from `stream.Duplex` and implements its own versions of the `writable._write()` and `readable._read()` -methods. Custom Transform implementations *must* implement the +methods. Custom `Transform` implementations *must* implement the [`transform._transform()`][stream-_transform] method and *may* also implement the [`transform._flush()`][stream-_flush] method. -Care must be taken when using Transform streams in that data written to the -stream can cause the Writable side of the stream to become paused if the output -on the Readable side is not consumed. +Care must be taken when using `Transform` streams in that data written to the +stream can cause the `Writable` side of the stream to become paused if the +output on the `Readable` side is not consumed. #### new stream.Transform([options]) -* `options` {Object} Passed to both Writable and Readable +* `options` {Object} Passed to both `Writable` and `Readable` constructors. Also has the following fields: * `transform` {Function} Implementation for the [`stream._transform()`][stream-_transform] method. @@ -2267,8 +2269,8 @@ after all data has been output, which occurs after the callback in argument and data) to be called when remaining data has been flushed. This function MUST NOT be called by application code directly. It should be -implemented by child classes, and called by the internal Readable class methods -only. +implemented by child classes, and called by the internal `Readable` class +methods only. In some cases, a transform operation may need to emit an additional bit of data at the end of the stream. For example, a `zlib` compression stream will @@ -2276,10 +2278,10 @@ store an amount of internal state used to optimally compress the output. When the stream ends, however, that additional data needs to be flushed so that the compressed data will be complete. -Custom [Transform][] implementations *may* implement the `transform._flush()` +Custom [`Transform`][] implementations *may* implement the `transform._flush()` method. This will be called when there is no more written data to be consumed, but before the [`'end'`][] event is emitted signaling the end of the -[Readable][] stream. +[`Readable`][] stream. Within the `transform._flush()` implementation, the `readable.push()` method may be called zero or more times, as appropriate. The `callback` function must @@ -2302,10 +2304,10 @@ user programs. processed. This function MUST NOT be called by application code directly. It should be -implemented by child classes, and called by the internal Readable class methods -only. +implemented by child classes, and called by the internal `Readable` class +methods only. -All Transform stream implementations must provide a `_transform()` +All `Transform` stream implementations must provide a `_transform()` method to accept input and produce output. The `transform._transform()` implementation handles the bytes being written, computes an output, then passes that output off to the readable portion using the `readable.push()` method. @@ -2343,7 +2345,7 @@ called, either synchronously or asynchronously. #### Class: stream.PassThrough -The `stream.PassThrough` class is a trivial implementation of a [Transform][] +The `stream.PassThrough` class is a trivial implementation of a [`Transform`][] stream that simply passes the input bytes across to the output. Its purpose is primarily for examples and testing, but there are some use cases where `stream.PassThrough` is useful as a building block for novel sorts of streams. @@ -2356,7 +2358,7 @@ primarily for examples and testing, but there are some use cases where -In versions of Node.js prior to v0.10, the Readable stream interface was +In versions of Node.js prior to v0.10, the `Readable` stream interface was simpler, but also less powerful and less useful. * Rather than waiting for calls the [`stream.read()`][stream-read] method, @@ -2367,9 +2369,9 @@ simpler, but also less powerful and less useful. guaranteed. This meant that it was still necessary to be prepared to receive [`'data'`][] events *even when the stream was in a paused state*. -In Node.js v0.10, the [Readable][] class was added. For backwards compatibility -with older Node.js programs, Readable streams switch into "flowing mode" when a -[`'data'`][] event handler is added, or when the +In Node.js v0.10, the [`Readable`][] class was added. For backwards +compatibility with older Node.js programs, `Readable` streams switch into +"flowing mode" when a [`'data'`][] event handler is added, or when the [`stream.resume()`][stream-resume] method is called. The effect is that, even when not using the new [`stream.read()`][stream-read] method and [`'readable'`][] event, it is no longer necessary to worry about losing @@ -2416,8 +2418,8 @@ net.createServer((socket) => { }).listen(1337); ``` -In addition to new Readable streams switching into flowing mode, -pre-v0.10 style streams can be wrapped in a Readable class using the +In addition to new `Readable` streams switching into flowing mode, +pre-v0.10 style streams can be wrapped in a `Readable` class using the [`readable.wrap()`][`stream.wrap()`] method. ### `readable.read(0)` @@ -2433,7 +2435,7 @@ a low-level [`stream._read()`][stream-_read] call. While most applications will almost never need to do this, there are situations within Node.js where this is done, particularly in the -Readable stream class internals. +`Readable` stream class internals. ### `readable.push('')` @@ -2483,13 +2485,13 @@ contain multi-byte characters. [API for Stream Consumers]: #stream_api_for_stream_consumers [API for Stream Implementers]: #stream_api_for_stream_implementers [Compatibility]: #stream_compatibility_with_older_node_js_versions -[Duplex]: #stream_class_stream_duplex +[`Duplex`]: #stream_class_stream_duplex [HTTP requests, on the client]: http.html#http_class_http_clientrequest [HTTP responses, on the server]: http.html#http_class_http_serverresponse -[Readable]: #stream_class_stream_readable +[`Readable`]: #stream_class_stream_readable [TCP sockets]: net.html#net_class_net_socket -[Transform]: #stream_class_stream_transform -[Writable]: #stream_class_stream_writable +[`Transform`]: #stream_class_stream_transform +[`Writable`]: #stream_class_stream_writable [child process stdin]: child_process.html#child_process_subprocess_stdin [child process stdout and stderr]: child_process.html#child_process_subprocess_stdout [crypto]: crypto.html diff --git a/doc/api/tls.md b/doc/api/tls.md index bdda8bd7343873..e22286adb45ad3 100644 --- a/doc/api/tls.md +++ b/doc/api/tls.md @@ -422,8 +422,7 @@ added: v3.0.0 Updates the keys for encryption/decryption of the [TLS Session Tickets][]. The key's `Buffer` should be 48 bytes long. See `ticketKeys` option in -[tls.createServer](#tls_tls_createserver_options_secureconnectionlistener) for -more information on how it is used. +[`tls.createServer()`] for more information on how it is used. Changes to the ticket keys are effective only for future server connections. Existing or currently pending server connections will use the previous keys. @@ -582,7 +581,7 @@ an ephemeral key exchange in [Perfect Forward Secrecy][] on a client connection. It returns an empty object when the key exchange is not ephemeral. As this is only supported on a client socket; `null` is returned if called on a server socket. The supported types are `'DH'` and `'ECDH'`. The -`name` property is available only when type is 'ECDH'. +`name` property is available only when type is `'ECDH'`. For example: `{ type: 'ECDH', name: 'prime256v1', size: 256 }`. @@ -615,7 +614,7 @@ added: v0.11.4 Returns an object representing the peer's certificate. The returned object has some properties corresponding to the fields of the certificate. -If the full certificate chain was requested, each certificate will include a +If the full certificate chain was requested, each certificate will include an `issuerCertificate` property containing an object representing its issuer's certificate. @@ -637,7 +636,7 @@ For example: OU: 'Test TLS Certificate', CN: 'localhost' }, issuerCertificate: - { ... another certificate, possibly with a .issuerCertificate ... }, + { ... another certificate, possibly with an .issuerCertificate ... }, raw: < RAW DER buffer >, pubkey: < RAW DER buffer >, valid_from: 'Nov 11 09:52:22 2009 GMT', @@ -1016,7 +1015,7 @@ changes: - version: v7.3.0 pr-url: https://github.com/nodejs/node/pull/10294 description: If the `key` option is an array, individual entries do not - need a `passphrase` property anymore. Array entries can also + need a `passphrase` property anymore. `Array` entries can also just be `string`s or `Buffer`s now. - version: v5.2.0 pr-url: https://github.com/nodejs/node/pull/4099 @@ -1056,9 +1055,9 @@ changes: * `ca` {string|string[]|Buffer|Buffer[]} Optionally override the trusted CA certificates. Default is to trust the well-known CAs curated by Mozilla. Mozilla's CAs are completely replaced when CAs are explicitly specified - using this option. The value can be a string or Buffer, or an Array of - strings and/or Buffers. Any string or Buffer can contain multiple PEM CAs - concatenated together. The peer's certificate must be chainable to a CA + using this option. The value can be a string or `Buffer`, or an `Array` of + strings and/or `Buffer`s. Any string or `Buffer` can contain multiple PEM + CAs concatenated together. The peer's certificate must be chainable to a CA trusted by the server for the connection to be authenticated. When using certificates that are not chainable to a well-known CA, the certificate's CA must be explicitly specified as a trusted or the connection will fail to @@ -1156,12 +1155,12 @@ changes: * `SNICallback(servername, cb)` {Function} A function that will be called if the client supports SNI TLS extension. Two arguments will be passed when called: `servername` and `cb`. `SNICallback` should invoke `cb(null, ctx)`, - where `ctx` is a SecureContext instance. (`tls.createSecureContext(...)` can - be used to get a proper SecureContext.) If `SNICallback` wasn't provided the - default callback with high-level API will be used (see below). + where `ctx` is a `SecureContext` instance. (`tls.createSecureContext(...)` + can be used to get a proper `SecureContext`.) If `SNICallback` wasn't + provided the default callback with high-level API will be used (see below). * `sessionTimeout` {number} An integer specifying the number of seconds after which the TLS session identifiers and TLS session tickets created by the - server will time out. See [SSL_CTX_set_timeout] for more details. + server will time out. See [`SSL_CTX_set_timeout`] for more details. * `ticketKeys`: A 48-byte `Buffer` instance consisting of a 16-byte prefix, a 16-byte HMAC key, and a 16-byte AES key. This can be used to accept TLS session tickets on multiple instances of the TLS server. @@ -1169,7 +1168,7 @@ changes: servers, the identity options (`pfx` or `key`/`cert`) are usually required. * `secureConnectionListener` {Function} -Creates a new [tls.Server][]. The `secureConnectionListener`, if provided, is +Creates a new [`tls.Server`][]. The `secureConnectionListener`, if provided, is automatically set as a listener for the [`'secureConnection'`][] event. The `ticketKeys` options is automatically shared between `cluster` module @@ -1371,13 +1370,16 @@ where `secureSocket` has the same API as `pair.cleartext`. [`'secureConnect'`]: #tls_event_secureconnect [`'secureConnection'`]: #tls_event_secureconnection +[`SSL_CTX_set_timeout`]: https://www.openssl.org/docs/man1.1.0/ssl/SSL_CTX_set_timeout.html [`crypto.getCurves()`]: crypto.html#crypto_crypto_getcurves +[`dns.lookup()`]: dns.html#dns_dns_lookup_hostname_options_callback [`net.Server.address()`]: net.html#net_server_address [`net.Server`]: net.html#net_class_net_server [`net.Socket`]: net.html#net_class_net_socket [`server.getConnections()`]: net.html#net_server_getconnections_callback [`server.listen()`]: net.html#net_server_listen [`tls.DEFAULT_ECDH_CURVE`]: #tls_tls_default_ecdh_curve +[`tls.Server`]: #tls_class_tls_server [`tls.TLSSocket.getPeerCertificate()`]: #tls_tlssocket_getpeercertificate_detailed [`tls.TLSSocket`]: #tls_class_tls_tlssocket [`tls.connect()`]: #tls_tls_connect_options_callback @@ -1392,7 +1394,7 @@ where `secureSocket` has the same API as `pair.cleartext`. [OpenSSL Options]: crypto.html#crypto_openssl_options [OpenSSL cipher list format documentation]: https://www.openssl.org/docs/man1.1.0/apps/ciphers.html#CIPHER-LIST-FORMAT [Perfect Forward Secrecy]: #tls_perfect_forward_secrecy -[SSL_CTX_set_timeout]: https://www.openssl.org/docs/man1.1.0/ssl/SSL_CTX_set_timeout.html +[RFC 5929]: https://tools.ietf.org/html/rfc5929 [SSL_METHODS]: https://www.openssl.org/docs/man1.1.0/ssl/ssl.html#Dealing-with-Protocol-Methods [Stream]: stream.html#stream_stream [TLS Session Tickets]: https://www.ietf.org/rfc/rfc5077.txt @@ -1400,6 +1402,3 @@ where `secureSocket` has the same API as `pair.cleartext`. [asn1.js]: https://npmjs.org/package/asn1.js [modifying the default cipher suite]: #tls_modifying_the_default_tls_cipher_suite [specific attacks affecting larger AES key sizes]: https://www.schneier.com/blog/archives/2009/07/another_new_aes.html -[tls.Server]: #tls_class_tls_server -[`dns.lookup()`]: dns.html#dns_dns_lookup_hostname_options_callback -[RFC 5929]: https://tools.ietf.org/html/rfc5929 diff --git a/doc/api/tracing.md b/doc/api/tracing.md index f83e808dc89220..228d3a6678482c 100644 --- a/doc/api/tracing.md +++ b/doc/api/tracing.md @@ -8,14 +8,14 @@ Trace Event provides a mechanism to centralize tracing information generated by V8, Node.js core, and userspace code. Tracing can be enabled with the `--trace-event-categories` command-line flag -or by using the trace_events module. The `--trace-event-categories` flag accepts -a list of comma-separated category names. +or by using the `trace_events` module. The `--trace-event-categories` flag +accepts a list of comma-separated category names. The available categories are: * `node` - An empty placeholder. -* `node.async_hooks` - Enables capture of detailed [async_hooks] trace data. - The [async_hooks] events have a unique `asyncId` and a special triggerId +* `node.async_hooks` - Enables capture of detailed [`async_hooks`] trace data. + The [`async_hooks`] events have a unique `asyncId` and a special `triggerId` `triggerAsyncId` property. * `node.bootstrap` - Enables capture of Node.js bootstrap milestones. * `node.perf` - Enables capture of [Performance API] measurements. @@ -23,6 +23,7 @@ The available categories are: measures and marks. * `node.perf.timerify` - Enables capture of only Performance API timerify measurements. +* `node.fs.sync` - Enables capture of trace data for file system sync methods. * `v8` - The [V8] events are GC, compiling, and execution related. By default the `node`, `node.async_hooks`, and `v8` categories are enabled. @@ -196,4 +197,4 @@ console.log(trace_events.getEnabledCategories()); [Performance API]: perf_hooks.html [V8]: v8.html -[async_hooks]: async_hooks.html +[`async_hooks`]: async_hooks.html diff --git a/doc/api/tty.md b/doc/api/tty.md index f8bc4feec3e86d..91bca8284d9378 100644 --- a/doc/api/tty.md +++ b/doc/api/tty.md @@ -126,15 +126,15 @@ is updated whenever the `'resize'` event is emitted. added: v9.9.0 --> -* `env` {Object} A object containing the environment variables to check. +* `env` {Object} An object containing the environment variables to check. **Default:** `process.env`. * Returns: {number} Returns: -* 1 for 2, -* 4 for 16, -* 8 for 256, -* 24 for 16,777,216 +* `1` for 2, +* `4` for 16, +* `8` for 256, +* `24` for 16,777,216 colors supported. Use this to determine what colors the terminal supports. Due to the nature of diff --git a/doc/api/url.md b/doc/api/url.md index a7add464e8b983..64b7b444c54ffd 100644 --- a/doc/api/url.md +++ b/doc/api/url.md @@ -86,8 +86,8 @@ The `URL` class is also available on the global object. In accordance with browser conventions, all properties of `URL` objects are implemented as getters and setters on the class prototype, rather than as -data properties on the object itself. Thus, unlike [legacy urlObject][]s, using -the `delete` keyword on any properties of `URL` objects (e.g. `delete +data properties on the object itself. Thus, unlike [legacy `urlObject`][]s, +using the `delete` keyword on any properties of `URL` objects (e.g. `delete myURL.protocol`, `delete myURL.pathname`, etc) has no effect but will still return `true`. @@ -346,7 +346,7 @@ console.log(myURL.port); // Prints 1234 ``` -The port value may be set as either a number or as a String containing a number +The port value may be set as either a number or as a string containing a number in the range `0` to `65535` (inclusive). Setting the value to the default port of the `URL` objects given `protocol` will result in the `port` value becoming the empty string (`''`). @@ -581,7 +581,7 @@ added: v7.10.0 * `iterable` {Iterable} An iterable object whose elements are key-value pairs Instantiate a new `URLSearchParams` object with an iterable map in a way that -is similar to [`Map`][]'s constructor. `iterable` can be an Array or any +is similar to [`Map`][]'s constructor. `iterable` can be an `Array` or any iterable object. That means `iterable` can be another `URLSearchParams`, in which case the constructor will simply create a clone of the provided `URLSearchParams`. Elements of `iterable` are key-value pairs, and can @@ -644,16 +644,16 @@ Remove all name-value pairs whose name is `name`. * Returns: {Iterator} -Returns an ES6 Iterator over each of the name-value pairs in the query. -Each item of the iterator is a JavaScript Array. The first item of the Array -is the `name`, the second item of the Array is the `value`. +Returns an ES6 `Iterator` over each of the name-value pairs in the query. +Each item of the iterator is a JavaScript `Array`. The first item of the `Array` +is the `name`, the second item of the `Array` is the `value`. Alias for [`urlSearchParams[@@iterator]()`][`urlSearchParams@@iterator()`]. #### urlSearchParams.forEach(fn[, thisArg]) -* `fn` {Function} Function invoked for each name-value pair in the query. -* `thisArg` {Object} Object to be used as `this` value for when `fn` is called +* `fn` {Function} Invoked for each name-value pair in the query +* `thisArg` {Object} To be used as `this` value for when `fn` is called Iterates over each name-value pair in the query and invokes the given function. @@ -695,7 +695,7 @@ Returns `true` if there is at least one name-value pair whose name is `name`. * Returns: {Iterator} -Returns an ES6 Iterator over the names of each name-value pair. +Returns an ES6 `Iterator` over the names of each name-value pair. ```js const params = new URLSearchParams('foo=bar&foo=baz'); @@ -760,15 +760,15 @@ percent-encoded where necessary. * Returns: {Iterator} -Returns an ES6 Iterator over the values of each name-value pair. +Returns an ES6 `Iterator` over the values of each name-value pair. #### urlSearchParams\[Symbol.iterator\]() * Returns: {Iterator} -Returns an ES6 Iterator over each of the name-value pairs in the query string. -Each item of the iterator is a JavaScript Array. The first item of the Array -is the `name`, the second item of the Array is the `value`. +Returns an ES6 `Iterator` over each of the name-value pairs in the query string. +Each item of the iterator is a JavaScript `Array`. The first item of the `Array` +is the `name`, the second item of the `Array` is the `value`. Alias for [`urlSearchParams.entries()`][]. @@ -846,7 +846,7 @@ added: v7.6.0 Punycode encoded. **Default:** `false`. * Returns: {string} -Returns a customizable serialization of a URL String representation of a +Returns a customizable serialization of a URL `String` representation of a [WHATWG URL][] object. The URL object has both a `toString()` method and `href` property that return @@ -871,9 +871,9 @@ console.log(url.format(myURL, { fragment: false, unicode: true, auth: false })); ## Legacy URL API -### Legacy urlObject +### Legacy `urlObject` -The legacy urlObject (`require('url').Url`) is created and returned by the +The legacy `urlObject` (`require('url').Url`) is created and returned by the `url.parse()` function. #### urlObject.auth @@ -1039,7 +1039,7 @@ The formatting process operates as follows: `urlObject.host` is coerced to a string and appended to `result`. * If the `urlObject.pathname` property is a string that is not an empty string: * If the `urlObject.pathname` *does not start* with an ASCII forward slash - (`/`), then the literal string '/' is appended to `result`. + (`/`), then the literal string `'/'` is appended to `result`. * The value of `urlObject.pathname` is appended to `result`. * Otherwise, if `urlObject.pathname` is not `undefined` and is not a string, an [`Error`][] is thrown. @@ -1205,6 +1205,6 @@ console.log(myURL.origin); [WHATWG URL Standard]: https://url.spec.whatwg.org/ [WHATWG URL]: #url_the_whatwg_url_api [examples of parsed URLs]: https://url.spec.whatwg.org/#example-url-parsing -[legacy urlObject]: #url_legacy_urlobject +[legacy `urlObject`]: #url_legacy_urlobject [percent-encoded]: #whatwg-percent-encoding [stable sorting algorithm]: https://en.wikipedia.org/wiki/Sorting_algorithm#Stability diff --git a/doc/api/util.md b/doc/api/util.md index c91bea6549d6a7..3b0d2a99fc88a2 100644 --- a/doc/api/util.md +++ b/doc/api/util.md @@ -20,10 +20,10 @@ added: v8.2.0 * `original` {Function} An `async` function * Returns: {Function} a callback style function -Takes an `async` function (or a function that returns a Promise) and returns a +Takes an `async` function (or a function that returns a `Promise`) and returns a function following the error-first callback style, i.e. taking -a `(err, value) => ...` callback as the last argument. In the callback, the -first argument will be the rejection reason (or `null` if the Promise +an `(err, value) => ...` callback as the last argument. In the callback, the +first argument will be the rejection reason (or `null` if the `Promise` resolved), and the second argument will be the resolved value. ```js @@ -197,18 +197,18 @@ The first argument is a string containing zero or more *placeholder* tokens. Each placeholder token is replaced with the converted value from the corresponding argument. Supported placeholders are: -* `%s` - String. -* `%d` - Number (integer or floating point value). +* `%s` - `String`. +* `%d` - `Number` (integer or floating point value). * `%i` - Integer. * `%f` - Floating point value. * `%j` - JSON. Replaced with the string `'[Circular]'` if the argument contains circular references. -* `%o` - Object. A string representation of an object +* `%o` - `Object`. A string representation of an object with generic JavaScript object formatting. Similar to `util.inspect()` with options `{ showHidden: true, showProxy: true }`. This will show the full object including non-enumerable properties and proxies. -* `%O` - Object. A string representation of an object with generic JavaScript +* `%O` - `Object`. A string representation of an object with generic JavaScript object formatting. Similar to `util.inspect()` without options. This will show the full object not including non-enumerable properties and proxies. * `%%` - single percent sign (`'%'`). This does not consume an argument. @@ -362,7 +362,7 @@ added: v0.3.0 changes: - version: v10.0.0 pr-url: https://github.com/nodejs/node/pull/19259 - description: WeakMap and WeakSet entries can now be inspected as well. + description: The `WeakMap` and `WeakSet` entries can now be inspected as well. - version: v9.9.0 pr-url: https://github.com/nodejs/node/pull/17576 description: The `compact` option is supported now. @@ -381,7 +381,7 @@ changes: description: The `showProxy` option is supported now. --> -* `object` {any} Any JavaScript primitive or Object. +* `object` {any} Any JavaScript primitive or `Object`. * `options` {Object} * `showHidden` {boolean} If `true`, the `object`'s non-enumerable symbols and properties will be included in the formatted result as well as [`WeakMap`][] @@ -623,7 +623,7 @@ util.inspect(obj); added: v6.6.0 --> -A Symbol that can be used to declare custom inspect functions, see +A {symbol} that can be used to declare custom inspect functions, see [Custom inspection functions on Objects][]. ### util.inspect.defaultOptions @@ -670,7 +670,7 @@ added: v8.0.0 * Returns: {Function} Takes a function following the common error-first callback style, i.e. taking -a `(err, value) => ...` callback as the last argument, and returns a version +an `(err, value) => ...` callback as the last argument, and returns a version that returns promises. ```js @@ -752,7 +752,7 @@ added: v8.0.0 * {symbol} -A Symbol that can be used to declare custom promisified variants of functions, +A {symbol} that can be used to declare custom promisified variants of functions, see [Custom promisified functions][]. ## Class: util.TextDecoder @@ -859,7 +859,7 @@ supported encodings or an alias. ### textDecoder.decode([input[, options]]) * `input` {ArrayBuffer|DataView|TypedArray} An `ArrayBuffer`, `DataView` or - Typed Array instance containing the encoded data. + `Typed Array` instance containing the encoded data. * `options` {Object} * `stream` {boolean} `true` if additional chunks of data are expected. **Default:** `false`. diff --git a/doc/api/vm.md b/doc/api/vm.md index 68b25b6aa32d23..9e1249dc4ed8bb 100644 --- a/doc/api/vm.md +++ b/doc/api/vm.md @@ -162,20 +162,20 @@ const contextifiedSandbox = vm.createContext({ secret: 42 }); * `url` {string} URL used in module resolution and stack traces. **Default:** `'vm:module(i)'` where `i` is a context-specific ascending index. * `context` {Object} The [contextified][] object as returned by the - `vm.createContext()` method, to compile and evaluate this Module in. + `vm.createContext()` method, to compile and evaluate this `Module` in. * `lineOffset` {integer} Specifies the line number offset that is displayed - in stack traces produced by this Module. - * `columnOffset` {integer} Spcifies the column number offset that is displayed - in stack traces produced by this Module. - * `initalizeImportMeta` {Function} Called during evaluation of this Module to - initialize the `import.meta`. This function has the signature `(meta, - module)`, where `meta` is the `import.meta` object in the Module, and + in stack traces produced by this `Module`. + * `columnOffset` {integer} Specifies the column number offset that is + displayed in stack traces produced by this `Module`. + * `initalizeImportMeta` {Function} Called during evaluation of this `Module` + to initialize the `import.meta`. This function has the signature `(meta, + module)`, where `meta` is the `import.meta` object in the `Module`, and `module` is this `vm.Module` object. Creates a new ES `Module` object. *Note*: Properties assigned to the `import.meta` object that are objects may -allow the Module to access information outside the specified `context`, if the +allow the `Module` to access information outside the specified `context`, if the object is created in the top level context. Use `vm.runInContext()` to create objects in a specific context. @@ -217,8 +217,8 @@ const contextifiedSandbox = vm.createContext({ secret: 42 }); The specifiers of all dependencies of this module. The returned array is frozen to disallow any changes to it. -Corresponds to the [[RequestedModules]] field of [Source Text Module Record][]s -in the ECMAScript specification. +Corresponds to the `[[RequestedModules]]` field of +[Source Text Module Record][]s in the ECMAScript specification. ### module.error @@ -231,7 +231,7 @@ accessing this property will result in a thrown exception. The value `undefined` cannot be used for cases where there is not a thrown exception due to possible ambiguity with `throw undefined;`. -Corresponds to the [[EvaluationError]] field of [Source Text Module Record][]s +Corresponds to the `[[EvaluationError]]` field of [Source Text Module Record][]s in the ECMAScript specification. ### module.linkingStatus @@ -246,8 +246,8 @@ The current linking status of `module`. It will be one of the following values: - `'linked'`: `module.link()` has been called, and all its dependencies have been successfully linked. - `'errored'`: `module.link()` has been called, but at least one of its - dependencies failed to link, either because the callback returned a Promise - that is rejected, or because the Module the callback returned is invalid. + dependencies failed to link, either because the callback returned a `Promise` + that is rejected, or because the `Module` the callback returned is invalid. ### module.namespace @@ -289,9 +289,9 @@ The current status of the module. Will be one of: - `'errored'`: The module has been evaluated, but an exception was thrown. Other than `'errored'`, this status string corresponds to the specification's -[Source Text Module Record][]'s [[Status]] field. `'errored'` corresponds to -`'evaluated'` in the specification, but with [[EvaluationError]] set to a value -that is not `undefined`. +[Source Text Module Record][]'s `[[Status]]` field. `'errored'` corresponds to +`'evaluated'` in the specification, but with `[[EvaluationError]]` set to a +value that is not `undefined`. ### module.url diff --git a/doc/api/zlib.md b/doc/api/zlib.md index 33dbdbef1d741e..0e66abdcfb0766 100644 --- a/doc/api/zlib.md +++ b/doc/api/zlib.md @@ -165,7 +165,7 @@ The memory requirements for deflate are (in bytes): (1 << (windowBits + 2)) + (1 << (memLevel + 9)) ``` -That is: 128K for windowBits = 15 + 128K for memLevel = 8 +That is: 128K for `windowBits` = 15 + 128K for `memLevel` = 8 (default values) plus a few kilobytes for small objects. For example, to reduce the default memory requirements from 256K to 128K, the @@ -178,7 +178,7 @@ const options = { windowBits: 14, memLevel: 7 }; This will, however, generally degrade compression. The memory requirements for inflate are (in bytes) `1 << windowBits`. -That is, 32K for windowBits = 15 (default value) plus a few kilobytes +That is, 32K for `windowBits` = 15 (default value) plus a few kilobytes for small objects. This is in addition to a single internal output slab buffer of size @@ -287,10 +287,10 @@ added: v0.11.1 changes: - version: v9.4.0 pr-url: https://github.com/nodejs/node/pull/16042 - description: The `dictionary` option can be an ArrayBuffer. + description: The `dictionary` option can be an `ArrayBuffer`. - version: v8.0.0 pr-url: https://github.com/nodejs/node/pull/12001 - description: The `dictionary` option can be an Uint8Array now. + description: The `dictionary` option can be an `Uint8Array` now. - version: v5.11.0 pr-url: https://github.com/nodejs/node/pull/6069 description: The `finishFlush` option is supported now. @@ -473,17 +473,17 @@ Provides an object enumerating Zlib-related constants. added: v0.5.8 --> -Creates and returns a new [Deflate][] object with the given [`options`][]. +Creates and returns a new [`Deflate`][] object with the given [`options`][]. ## zlib.createDeflateRaw([options]) -Creates and returns a new [DeflateRaw][] object with the given [`options`][]. +Creates and returns a new [`DeflateRaw`][] object with the given [`options`][]. -An upgrade of zlib from 1.2.8 to 1.2.11 changed behavior when windowBits -is set to 8 for raw deflate streams. zlib would automatically set windowBits +An upgrade of zlib from 1.2.8 to 1.2.11 changed behavior when `windowBits` +is set to 8 for raw deflate streams. zlib would automatically set `windowBits` to 9 if was initially set to 8. Newer versions of zlib will throw an exception, so Node.js restored the original behavior of upgrading a value of 8 to 9, since passing `windowBits = 9` to zlib actually results in a compressed stream @@ -494,35 +494,35 @@ that effectively uses an 8-bit window only. added: v0.5.8 --> -Creates and returns a new [Gunzip][] object with the given [`options`][]. +Creates and returns a new [`Gunzip`][] object with the given [`options`][]. ## zlib.createGzip([options]) -Creates and returns a new [Gzip][] object with the given [`options`][]. +Creates and returns a new [`Gzip`][] object with the given [`options`][]. ## zlib.createInflate([options]) -Creates and returns a new [Inflate][] object with the given [`options`][]. +Creates and returns a new [`Inflate`][] object with the given [`options`][]. ## zlib.createInflateRaw([options]) -Creates and returns a new [InflateRaw][] object with the given [`options`][]. +Creates and returns a new [`InflateRaw`][] object with the given [`options`][]. ## zlib.createUnzip([options]) -Creates and returns a new [Unzip][] object with the given [`options`][]. +Creates and returns a new [`Unzip`][] object with the given [`options`][]. ## Convenience Methods @@ -542,13 +542,13 @@ added: v0.6.0 changes: - version: v9.4.0 pr-url: https://github.com/nodejs/node/pull/16042 - description: The `buffer` parameter can be an ArrayBuffer. + description: The `buffer` parameter can be an `ArrayBuffer`. - version: v8.0.0 pr-url: https://github.com/nodejs/node/pull/12223 - description: The `buffer` parameter can be any TypedArray or DataView now. + description: The `buffer` parameter can be any `TypedArray` or `DataView`. - version: v8.0.0 pr-url: https://github.com/nodejs/node/pull/12001 - description: The `buffer` parameter can be an Uint8Array now. + description: The `buffer` parameter can be an `Uint8Array` now. --> ### zlib.deflateSync(buffer[, options]) - `buffer` {Buffer|TypedArray|DataView|ArrayBuffer|string} -Compress a chunk of data with [Deflate][]. +Compress a chunk of data with [`Deflate`][]. ### zlib.deflateRaw(buffer[, options], callback) ### zlib.deflateRawSync(buffer[, options]) - `buffer` {Buffer|TypedArray|DataView|ArrayBuffer|string} -Compress a chunk of data with [DeflateRaw][]. +Compress a chunk of data with [`DeflateRaw`][]. ### zlib.gunzip(buffer[, options], callback) ### zlib.gunzipSync(buffer[, options]) - `buffer` {Buffer|TypedArray|DataView|ArrayBuffer|string} -Decompress a chunk of data with [Gunzip][]. +Decompress a chunk of data with [`Gunzip`][]. ### zlib.gzip(buffer[, options], callback) ### zlib.gzipSync(buffer[, options]) - `buffer` {Buffer|TypedArray|DataView|ArrayBuffer|string} -Compress a chunk of data with [Gzip][]. +Compress a chunk of data with [`Gzip`][]. ### zlib.inflate(buffer[, options], callback) ### zlib.inflateSync(buffer[, options]) - `buffer` {Buffer|TypedArray|DataView|ArrayBuffer|string} -Decompress a chunk of data with [Inflate][]. +Decompress a chunk of data with [`Inflate`][]. ### zlib.inflateRaw(buffer[, options], callback) ### zlib.inflateRawSync(buffer[, options]) - `buffer` {Buffer|TypedArray|DataView|ArrayBuffer|string} -Decompress a chunk of data with [InflateRaw][]. +Decompress a chunk of data with [`InflateRaw`][]. ### zlib.unzip(buffer[, options], callback) ### zlib.unzipSync(buffer[, options]) - `buffer` {Buffer|TypedArray|DataView|ArrayBuffer|string} -Decompress a chunk of data with [Unzip][]. +Decompress a chunk of data with [`Unzip`][]. [`.flush()`]: #zlib_zlib_flush_kind_callback [`Accept-Encoding`]: https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.3 @@ -770,16 +770,16 @@ Decompress a chunk of data with [Unzip][]. [`Buffer`]: buffer.html#buffer_class_buffer [`Content-Encoding`]: https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 [`DataView`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/DataView +[`Deflate`]: #zlib_class_zlib_deflate +[`DeflateRaw`]: #zlib_class_zlib_deflateraw +[`Gunzip`]: #zlib_class_zlib_gunzip +[`Gzip`]: #zlib_class_zlib_gzip +[`Inflate`]: #zlib_class_zlib_inflate +[`InflateRaw`]: #zlib_class_zlib_inflateraw [`TypedArray`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray -[`options`]: #zlib_class_options -[DeflateRaw]: #zlib_class_zlib_deflateraw -[Deflate]: #zlib_class_zlib_deflate -[Gunzip]: #zlib_class_zlib_gunzip -[Gzip]: #zlib_class_zlib_gzip -[InflateRaw]: #zlib_class_zlib_inflateraw -[Inflate]: #zlib_class_zlib_inflate -[Memory Usage Tuning]: #zlib_memory_usage_tuning -[Unzip]: #zlib_class_zlib_unzip [`UV_THREADPOOL_SIZE`]: cli.html#cli_uv_threadpool_size_size +[`Unzip`]: #zlib_class_zlib_unzip +[`options`]: #zlib_class_options [`zlib.bytesWritten`]: #zlib_zlib_byteswritten +[Memory Usage Tuning]: #zlib_memory_usage_tuning [zlib documentation]: https://zlib.net/manual.html#Constants diff --git a/doc/guides/contributing/pull-requests.md b/doc/guides/contributing/pull-requests.md index c248c1b9b6ec6f..0da7022d101ed5 100644 --- a/doc/guides/contributing/pull-requests.md +++ b/doc/guides/contributing/pull-requests.md @@ -35,7 +35,7 @@ so that you can make the actual changes. This is where we will start. * [Getting Approvals for your Pull Request](#getting-approvals-for-your-pull-request) * [CI Testing](#ci-testing) * [Waiting Until the Pull Request Gets Landed](#waiting-until-the-pull-request-gets-landed) - * [Check Out the Collaborator's Guide](#check-out-the-collaborators-guide) + * [Check Out the Collaborator Guide](#check-out-the-collaborator-guide) ## Dependencies @@ -599,9 +599,10 @@ whether the failure was caused by the changes in the Pull Request. ### Commit Squashing -When the commits in your Pull Request land, they may be squashed -into one commit per logical change. Metadata will be added to the commit -message (including links to the Pull Request, links to relevant issues, +In most cases, do not squash commits that you add to your Pull Request during +the review process. When the commits in your Pull Request land, they may be +squashed into one commit per logical change. Metadata will be added to the +commit message (including links to the Pull Request, links to relevant issues, and the names of the reviewers). The commit history of your Pull Request, however, will stay intact on the Pull Request page. @@ -644,17 +645,17 @@ doesn't need to wait. A Pull Request may well take longer to be merged in. All these precautions are important because Node.js is widely used, so don't be discouraged! -### Check Out the Collaborator's Guide +### Check Out the Collaborator Guide -If you want to know more about the code review and the landing process, -you can take a look at the -[collaborator's guide](https://github.com/nodejs/node/blob/master/COLLABORATOR_GUIDE.md). +If you want to know more about the code review and the landing process, see the +[Collaborator Guide][]. [approved]: #getting-approvals-for-your-pull-request [benchmark results]: ../writing-and-running-benchmarks.md [Building guide]: ../../../BUILDING.md [CI (Continuous Integration) test run]: #ci-testing [Code of Conduct]: https://github.com/nodejs/admin/blob/master/CODE_OF_CONDUCT.md +[Collaborator Guide]: ../../../COLLABORATOR_GUIDE.md [guide for writing tests in Node.js]: ../writing-tests.md [https://ci.nodejs.org/]: https://ci.nodejs.org/ [IRC in the #node-dev channel]: https://webchat.freenode.net?channels=node-dev&uio=d4 diff --git a/doc/guides/writing-and-running-benchmarks.md b/doc/guides/writing-and-running-benchmarks.md index a6482c607893ca..7fa5400b4ae31b 100644 --- a/doc/guides/writing-and-running-benchmarks.md +++ b/doc/guides/writing-and-running-benchmarks.md @@ -202,12 +202,11 @@ For analysing the benchmark results use the `compare.R` tool. ```console $ cat compare-pr-5134.csv | Rscript benchmark/compare.R - improvement confidence p.value -string_decoder/string-decoder.js n=250000 chunk=1024 inlen=1024 encoding=ascii 12.46 % *** 1.165345e-04 -string_decoder/string-decoder.js n=250000 chunk=1024 inlen=1024 encoding=base64-ascii 24.70 % *** 1.820615e-15 -string_decoder/string-decoder.js n=250000 chunk=1024 inlen=1024 encoding=base64-utf8 23.60 % *** 2.105625e-12 -string_decoder/string-decoder.js n=250000 chunk=1024 inlen=1024 encoding=utf8 14.04 % *** 1.291105e-07 -string_decoder/string-decoder.js n=250000 chunk=1024 inlen=128 encoding=ascii 6.70 % * 2.928003e-02 + confidence improvement accuracy (*) (**) (***) + string_decoder/string-decoder.js n=2500000 chunkLen=16 inLen=128 encoding='ascii' *** -3.76 % ±1.36% ±1.82% ±2.40% + string_decoder/string-decoder.js n=2500000 chunkLen=16 inLen=128 encoding='utf8' ** -0.81 % ±0.53% ±0.71% ±0.93% + string_decoder/string-decoder.js n=2500000 chunkLen=16 inLen=32 encoding='ascii' *** -2.70 % ±0.83% ±1.11% ±1.45% + string_decoder/string-decoder.js n=2500000 chunkLen=16 inLen=32 encoding='base64-ascii' *** -1.57 % ±0.83% ±1.11% ±1.46% ... ``` @@ -241,14 +240,13 @@ results afterwards using tools such as `sed` or `grep`. In the `sed` case be sure to keep the first line since that contains the header information. ```console -$ cat compare-pr-5134.csv | sed '1p;/encoding=ascii/!d' | Rscript benchmark/compare.R --plot compare-plot.png - - improvement confidence p.value -string_decoder/string-decoder.js n=250000 chunk=1024 inlen=1024 encoding=ascii 12.46 % *** 1.165345e-04 -string_decoder/string-decoder.js n=250000 chunk=1024 inlen=128 encoding=ascii 6.70 % * 2.928003e-02 -string_decoder/string-decoder.js n=250000 chunk=1024 inlen=32 encoding=ascii 7.47 % *** 5.780583e-04 -string_decoder/string-decoder.js n=250000 chunk=16 inlen=1024 encoding=ascii 8.94 % *** 1.788579e-04 -string_decoder/string-decoder.js n=250000 chunk=16 inlen=128 encoding=ascii 10.54 % *** 4.016172e-05 +$ cat compare-pr-5134.csv | sed '1p;/encoding='"'"ascii"'"'/!d' | Rscript benchmark/compare.R --plot compare-plot.png + + confidence improvement accuracy (*) (**) (***) + string_decoder/string-decoder.js n=2500000 chunkLen=16 inLen=128 encoding='ascii' *** -3.76 % ±1.36% ±1.82% ±2.40% + string_decoder/string-decoder.js n=2500000 chunkLen=16 inLen=32 encoding='ascii' *** -2.70 % ±0.83% ±1.11% ±1.45% + string_decoder/string-decoder.js n=2500000 chunkLen=16 inLen=4096 encoding='ascii' *** -4.06 % ±0.31% ±0.41% ±0.54% + string_decoder/string-decoder.js n=2500000 chunkLen=256 inLen=1024 encoding='ascii' *** -1.42 % ±0.58% ±0.77% ±1.01% ... ``` diff --git a/doc/node.1 b/doc/node.1 index c3572acee7bf6b..238572c74c4796 100644 --- a/doc/node.1 +++ b/doc/node.1 @@ -384,4 +384,3 @@ IRC (Node.js core development): .Sh AUTHORS Written and maintained by 1000+ contributors: .Sy https://github.com/nodejs/node/blob/master/AUTHORS -. diff --git a/doc/onboarding-extras.md b/doc/onboarding-extras.md index ffc316d7a670b6..62a7f7bb6b2837 100644 --- a/doc/onboarding-extras.md +++ b/doc/onboarding-extras.md @@ -97,4 +97,3 @@ to update from nodejs/node: ## Best practices * When making PRs, spend time writing a thorough description. -* Usually only squash at the end of your work. diff --git a/lib/_stream_readable.js b/lib/_stream_readable.js index 31a8a11e4a0673..8073e174cc586f 100644 --- a/lib/_stream_readable.js +++ b/lib/_stream_readable.js @@ -99,9 +99,6 @@ function ReadableState(options, stream, isDuplex) { this.endEmitted = false; this.reading = false; - // Flipped if an 'error' is emitted. - this.errorEmitted = false; - // a flag to be able to tell if the event 'readable'/'data' is emitted // immediately, or on a later tick. We set this to true at first, because // any actions that shouldn't happen until "later" should generally also @@ -1072,23 +1069,20 @@ function fromList(n, state) { function endReadable(stream) { var state = stream._readableState; - debug('endReadable', state.endEmitted, state.errorEmitted); - if (!state.endEmitted && !state.errorEmitted) { + debug('endReadable', state.endEmitted); + if (!state.endEmitted) { state.ended = true; process.nextTick(endReadableNT, state, stream); } } function endReadableNT(state, stream) { - debug('endReadableNT', state.endEmitted, state.length, state.errorEmitted); + debug('endReadableNT', state.endEmitted, state.length); // Check that we didn't get one last unshift. if (!state.endEmitted && state.length === 0) { + state.endEmitted = true; stream.readable = false; - - if (!state.errorEmitted) { - state.endEmitted = true; - stream.emit('end'); - } + stream.emit('end'); } } diff --git a/lib/_stream_writable.js b/lib/_stream_writable.js index 0891f85526f132..d21daf0541d339 100644 --- a/lib/_stream_writable.js +++ b/lib/_stream_writable.js @@ -424,22 +424,12 @@ function onwriteError(stream, state, sync, er, cb) { // this can emit finish, and it will always happen // after error process.nextTick(finishMaybe, stream, state); - - // needed for duplex, fixes https://github.com/nodejs/node/issues/6083 - if (stream._readableState) { - stream._readableState.errorEmitted = true; - } stream._writableState.errorEmitted = true; stream.emit('error', er); } else { // the caller expect this to happen before if // it is async cb(er); - - // needed for duplex, fixes https://github.com/nodejs/node/issues/6083 - if (stream._readableState) { - stream._readableState.errorEmitted = true; - } stream._writableState.errorEmitted = true; stream.emit('error', er); // this can emit finish, but finish must diff --git a/lib/_tls_wrap.js b/lib/_tls_wrap.js index 2e6b2e8da559db..d85d85752b631b 100644 --- a/lib/_tls_wrap.js +++ b/lib/_tls_wrap.js @@ -62,32 +62,28 @@ const noop = () => {}; function onhandshakestart(now) { debug('onhandshakestart'); - assert(now >= this.lastHandshakeTime); + const { lastHandshakeTime } = this; + assert(now >= lastHandshakeTime); - const owner = this.owner; + this.lastHandshakeTime = now; - if ((now - this.lastHandshakeTime) >= tls.CLIENT_RENEG_WINDOW * 1000) { - this.handshakes = 0; - } + // If this is the first handshake we can skip the rest of the checks. + if (lastHandshakeTime === 0) + return; - const first = (this.lastHandshakeTime === 0); - this.lastHandshakeTime = now; - if (first) return; + if ((now - lastHandshakeTime) >= tls.CLIENT_RENEG_WINDOW * 1000) + this.handshakes = 1; + else + this.handshakes++; - if (++this.handshakes > tls.CLIENT_RENEG_LIMIT) { - // Defer the error event to the next tick. We're being called from OpenSSL's - // state machine and OpenSSL is not re-entrant. We cannot allow the user's - // callback to destroy the connection right now, it would crash and burn. - setImmediate(emitSessionAttackError, owner); + const { owner } = this; + if (this.handshakes > tls.CLIENT_RENEG_LIMIT) { + owner._emitTLSError(new ERR_TLS_SESSION_ATTACK()); + return; } - if (owner[kDisableRenegotiation] && this.handshakes > 0) { + if (owner[kDisableRenegotiation]) owner._emitTLSError(new ERR_TLS_RENEGOTIATION_DISABLED()); - } -} - -function emitSessionAttackError(socket) { - socket._emitTLSError(new ERR_TLS_SESSION_ATTACK()); } function onhandshakedone() { @@ -875,7 +871,7 @@ function Server(options, listener) { // Handle option defaults: this.setOptions(options); - var sharedCreds = tls.createSecureContext({ + this._sharedCreds = tls.createSecureContext({ pfx: this.pfx, key: this.key, passphrase: this.passphrase, @@ -891,7 +887,6 @@ function Server(options, listener) { crl: this.crl, sessionIdContext: this.sessionIdContext }); - this._sharedCreds = sharedCreds; this[kHandshakeTimeout] = options.handshakeTimeout || (120 * 1000); this[kSNICallback] = options.SNICallback; @@ -902,11 +897,11 @@ function Server(options, listener) { } if (this.sessionTimeout) { - sharedCreds.context.setSessionTimeout(this.sessionTimeout); + this._sharedCreds.context.setSessionTimeout(this.sessionTimeout); } if (this.ticketKeys) { - sharedCreds.context.setTicketKeys(this.ticketKeys); + this._sharedCreds.context.setTicketKeys(this.ticketKeys); } // constructor call diff --git a/lib/child_process.js b/lib/child_process.js index ee3752a817e781..734f67d7a51f99 100644 --- a/lib/child_process.js +++ b/lib/child_process.js @@ -59,7 +59,7 @@ function stdioStringToArray(option) { } } -exports.fork = function(modulePath /* , args, options */) { +exports.fork = function fork(modulePath /* , args, options */) { // Get options and args arguments. var execArgv; @@ -110,7 +110,7 @@ exports.fork = function(modulePath /* , args, options */) { }; -exports._forkChild = function(fd) { +exports._forkChild = function _forkChild(fd) { // set process.send() var p = new Pipe(PipeConstants.IPC); p.open(fd); @@ -143,7 +143,7 @@ function normalizeExecArgs(command, options, callback) { } -exports.exec = function(command /* , options, callback */) { +exports.exec = function exec(command /* , options, callback */) { var opts = normalizeExecArgs.apply(null, arguments); return exports.execFile(opts.file, opts.options, @@ -172,7 +172,7 @@ Object.defineProperty(exports.exec, util.promisify.custom, { value: customPromiseExecFunction(exports.exec) }); -exports.execFile = function(file /* , args, options, callback */) { +exports.execFile = function execFile(file /* , args, options, callback */) { var args = []; var callback; var options = { @@ -511,7 +511,7 @@ function normalizeSpawnArguments(file, args, options) { } -var spawn = exports.spawn = function(/* file, args, options */) { +var spawn = exports.spawn = function spawn(/* file, args, options */) { var opts = normalizeSpawnArguments.apply(null, arguments); var options = opts.options; var child = new ChildProcess(); diff --git a/lib/console.js b/lib/console.js index a0158ec6643782..0f08e37fa7e22c 100644 --- a/lib/console.js +++ b/lib/console.js @@ -315,15 +315,6 @@ const iterKey = '(iteration index)'; const isArray = (v) => ArrayIsArray(v) || isTypedArray(v) || isBuffer(v); -const inspect = (v) => { - const opt = { depth: 0, maxArrayLength: 3 }; - if (v !== null && typeof v === 'object' && - !isArray(v) && ObjectKeys(v).length > 2) - opt.depth = -1; - return util.inspect(v, opt); -}; - -const getIndexArray = (length) => ArrayFrom({ length }, (_, i) => inspect(i)); // https://console.spec.whatwg.org/#table Console.prototype.table = function(tabularData, properties) { @@ -336,6 +327,16 @@ Console.prototype.table = function(tabularData, properties) { const final = (k, v) => this.log(cliTable(k, v)); + const inspect = (v) => { + const opt = { depth: 0, maxArrayLength: 3 }; + if (v !== null && typeof v === 'object' && + !isArray(v) && ObjectKeys(v).length > 2) + opt.depth = -1; + Object.assign(opt, this[kGetInspectOptions](this._stdout)); + return util.inspect(v, opt); + }; + const getIndexArray = (length) => ArrayFrom({ length }, (_, i) => inspect(i)); + const mapIter = isMapIterator(tabularData); if (mapIter) tabularData = previewMapIterator(tabularData); diff --git a/lib/events.js b/lib/events.js index 46d1223e69a4bf..ff1648d6aa13e7 100644 --- a/lib/events.js +++ b/lib/events.js @@ -235,22 +235,20 @@ function _addListener(target, type, listener, prepend) { } // Check for listener leak - if (!existing.warned) { - m = $getMaxListeners(target); - if (m && m > 0 && existing.length > m) { - existing.warned = true; - // No error code for this since it is a Warning - // eslint-disable-next-line no-restricted-syntax - const w = new Error('Possible EventEmitter memory leak detected. ' + - `${existing.length} ${String(type)} listeners ` + - 'added. Use emitter.setMaxListeners() to ' + - 'increase limit'); - w.name = 'MaxListenersExceededWarning'; - w.emitter = target; - w.type = type; - w.count = existing.length; - process.emitWarning(w); - } + m = $getMaxListeners(target); + if (m > 0 && existing.length > m && !existing.warned) { + existing.warned = true; + // No error code for this since it is a Warning + // eslint-disable-next-line no-restricted-syntax + const w = new Error('Possible EventEmitter memory leak detected. ' + + `${existing.length} ${String(type)} listeners ` + + 'added. Use emitter.setMaxListeners() to ' + + 'increase limit'); + w.name = 'MaxListenersExceededWarning'; + w.emitter = target; + w.type = type; + w.count = existing.length; + process.emitWarning(w); } } diff --git a/lib/fs.js b/lib/fs.js index 2d41f17e8d1de5..e9d878803ddffd 100644 --- a/lib/fs.js +++ b/lib/fs.js @@ -45,7 +45,8 @@ const { Readable, Writable } = require('stream'); const EventEmitter = require('events'); const { FSReqWrap, statValues, kFsStatsFieldsLength } = binding; const { FSEvent } = process.binding('fs_event_wrap'); -const internalFS = require('internal/fs'); +const promises = require('internal/fs/promises'); +const internalFS = require('internal/fs/utils'); const { getPathFromURL } = require('internal/url'); const internalUtil = require('internal/util'); const { @@ -72,6 +73,21 @@ const { CHAR_BACKWARD_SLASH, } = require('internal/constants'); +let warn = true; + +Object.defineProperty(fs, 'promises', { + configurable: true, + enumerable: true, + get() { + if (warn) { + warn = false; + process.emitWarning('The fs.promises API is experimental', + 'ExperimentalWarning'); + } + return promises; + } +}); + Object.defineProperty(exports, 'constants', { configurable: false, enumerable: true, @@ -245,7 +261,7 @@ fs.readFile = function(path, options, callback) { req.oncomplete = readFileAfterOpen; if (context.isUserFd) { - process.nextTick(function() { + process.nextTick(function tick() { req.oncomplete(null, path); }); return; @@ -302,7 +318,7 @@ ReadFileContext.prototype.close = function(err) { this.err = err; if (this.isUserFd) { - process.nextTick(function() { + process.nextTick(function tick() { req.oncomplete(null); }); return; @@ -552,7 +568,7 @@ fs.read = function(fd, buffer, offset, length, position, callback) { length |= 0; if (length === 0) { - return process.nextTick(function() { + return process.nextTick(function tick() { callback && callback(null, 0, buffer); }); } @@ -1217,7 +1233,7 @@ function writeAll(fd, isUserFd, buffer, offset, length, position, callback) { if (isUserFd) { callback(writeErr); } else { - fs.close(fd, function() { + fs.close(fd, function close() { callback(writeErr); }); } diff --git a/lib/https.js b/lib/https.js index 4cca2fb9eeea88..2237a909e999c3 100644 --- a/lib/https.js +++ b/lib/https.js @@ -74,6 +74,7 @@ function Server(opts, requestListener) { this.timeout = 2 * 60 * 1000; this.keepAliveTimeout = 5000; + this.maxHeadersCount = null; } inherits(Server, tls.Server); diff --git a/lib/internal/errors.js b/lib/internal/errors.js index cc4c955cba1588..fc306e256c1f1a 100644 --- a/lib/internal/errors.js +++ b/lib/internal/errors.js @@ -870,7 +870,6 @@ E('ERR_INVALID_ARG_VALUE', (name, value, reason = 'is invalid') => { }, TypeError, RangeError); E('ERR_INVALID_ARRAY_LENGTH', (name, len, actual) => { - internalAssert(typeof actual === 'number', 'actual must be of type number'); return `The array "${name}" (length ${actual}) must be of length ${len}.`; }, TypeError); E('ERR_INVALID_ASYNC_ID', 'Invalid %s value: %s', RangeError); diff --git a/lib/internal/fixed_queue.js b/lib/internal/fixed_queue.js new file mode 100644 index 00000000000000..7571a8f67e36d2 --- /dev/null +++ b/lib/internal/fixed_queue.js @@ -0,0 +1,113 @@ +'use strict'; + +// Currently optimal queue size, tested on V8 6.0 - 6.6. Must be power of two. +const kSize = 2048; +const kMask = kSize - 1; + +// The FixedQueue is implemented as a singly-linked list of fixed-size +// circular buffers. It looks something like this: +// +// head tail +// | | +// v v +// +-----------+ <-----\ +-----------+ <------\ +-----------+ +// | [null] | \----- | next | \------- | next | +// +-----------+ +-----------+ +-----------+ +// | item | <-- bottom | item | <-- bottom | [empty] | +// | item | | item | | [empty] | +// | item | | item | | [empty] | +// | item | | item | | [empty] | +// | item | | item | bottom --> | item | +// | item | | item | | item | +// | ... | | ... | | ... | +// | item | | item | | item | +// | item | | item | | item | +// | [empty] | <-- top | item | | item | +// | [empty] | | item | | item | +// | [empty] | | [empty] | <-- top top --> | [empty] | +// +-----------+ +-----------+ +-----------+ +// +// Or, if there is only one circular buffer, it looks something +// like either of these: +// +// head tail head tail +// | | | | +// v v v v +// +-----------+ +-----------+ +// | [null] | | [null] | +// +-----------+ +-----------+ +// | [empty] | | item | +// | [empty] | | item | +// | item | <-- bottom top --> | [empty] | +// | item | | [empty] | +// | [empty] | <-- top bottom --> | item | +// | [empty] | | item | +// +-----------+ +-----------+ +// +// Adding a value means moving `top` forward by one, removing means +// moving `bottom` forward by one. After reaching the end, the queue +// wraps around. +// +// When `top === bottom` the current queue is empty and when +// `top + 1 === bottom` it's full. This wastes a single space of storage +// but allows much quicker checks. + +const FixedCircularBuffer = class FixedCircularBuffer { + constructor() { + this.bottom = 0; + this.top = 0; + this.list = new Array(kSize); + this.next = null; + } + + isEmpty() { + return this.top === this.bottom; + } + + isFull() { + return ((this.top + 1) & kMask) === this.bottom; + } + + push(data) { + this.list[this.top] = data; + this.top = (this.top + 1) & kMask; + } + + shift() { + const nextItem = this.list[this.bottom]; + if (nextItem === undefined) + return null; + this.list[this.bottom] = undefined; + this.bottom = (this.bottom + 1) & kMask; + return nextItem; + } +}; + +module.exports = class FixedQueue { + constructor() { + this.head = this.tail = new FixedCircularBuffer(); + } + + isEmpty() { + return this.head.isEmpty(); + } + + push(data) { + if (this.head.isFull()) { + // Head is full: Creates a new queue, sets the old queue's `.next` to it, + // and sets it as the new main queue. + this.head = this.head.next = new FixedCircularBuffer(); + } + this.head.push(data); + } + + shift() { + const { tail } = this; + const next = tail.shift(); + if (tail.isEmpty() && tail.next !== null) { + // If there is another queue, it forms the new tail. + this.tail = tail.next; + } + return next; + } +}; diff --git a/lib/fs/promises.js b/lib/internal/fs/promises.js similarity index 97% rename from lib/fs/promises.js rename to lib/internal/fs/promises.js index ba6c2b7aa64855..eb913adcbf975b 100644 --- a/lib/fs/promises.js +++ b/lib/internal/fs/promises.js @@ -1,8 +1,5 @@ 'use strict'; -process.emitWarning('The fs/promises API is experimental', - 'ExperimentalWarning'); - const { F_OK, O_SYMLINK, @@ -37,7 +34,7 @@ const { validateOffsetLengthWrite, validatePath, validateUint32 -} = require('internal/fs'); +} = require('internal/fs/utils'); const pathModule = require('path'); const kHandle = Symbol('handle'); @@ -245,15 +242,6 @@ async function write(handle, buffer, offset, length, position) { if (typeof buffer !== 'string') buffer += ''; - if (typeof position !== 'function') { - if (typeof offset === 'function') { - position = offset; - offset = null; - } else { - position = length; - } - length = 'utf8'; - } const bytesWritten = (await binding.writeString(handle.fd, buffer, offset, length, kUsePromises)) || 0; return { bytesWritten, buffer }; @@ -400,7 +388,7 @@ async function lchown(path, uid, gid) { if (O_SYMLINK !== undefined) { const fd = await open(path, O_WRONLY | O_SYMLINK); - return fchmod(fd, uid, gid).finally(fd.close.bind(fd)); + return fchown(fd, uid, gid).finally(fd.close.bind(fd)); } throw new ERR_METHOD_NOT_IMPLEMENTED(); } diff --git a/lib/internal/fs.js b/lib/internal/fs/utils.js similarity index 100% rename from lib/internal/fs.js rename to lib/internal/fs/utils.js diff --git a/lib/internal/modules/cjs/loader.js b/lib/internal/modules/cjs/loader.js index 652378ad5782fb..85ab3ab1443ded 100644 --- a/lib/internal/modules/cjs/loader.js +++ b/lib/internal/modules/cjs/loader.js @@ -28,7 +28,7 @@ const { getURLFromFilePath } = require('internal/url'); const vm = require('vm'); const assert = require('assert').ok; const fs = require('fs'); -const internalFS = require('internal/fs'); +const internalFS = require('internal/fs/utils'); const path = require('path'); const { internalModuleReadJSON, diff --git a/lib/internal/modules/esm/default_resolve.js b/lib/internal/modules/esm/default_resolve.js index 60516535e9ad03..48d3ef73fd86cd 100644 --- a/lib/internal/modules/esm/default_resolve.js +++ b/lib/internal/modules/esm/default_resolve.js @@ -2,7 +2,7 @@ const { URL } = require('url'); const CJSmodule = require('internal/modules/cjs/loader'); -const internalFS = require('internal/fs'); +const internalFS = require('internal/fs/utils'); const { NativeModule, internalBinding } = require('internal/bootstrap/loaders'); const { extname } = require('path'); const { realpathSync } = require('fs'); diff --git a/lib/internal/process/next_tick.js b/lib/internal/process/next_tick.js index 7a5d2f88a6d17e..dbe0ce8cdbdacf 100644 --- a/lib/internal/process/next_tick.js +++ b/lib/internal/process/next_tick.js @@ -16,6 +16,7 @@ function setupNextTick() { } = require('internal/async_hooks'); const promises = require('internal/process/promises'); const { ERR_INVALID_CALLBACK } = require('internal/errors').codes; + const FixedQueue = require('internal/fixed_queue'); const { emitPromiseRejectionWarnings } = promises; // tickInfo is used so that the C++ code in src/node.cc can @@ -31,119 +32,7 @@ function setupNextTick() { const kHasScheduled = 0; const kHasPromiseRejections = 1; - // Queue size for each tick array. Must be a power of two. - const kQueueSize = 2048; - const kQueueMask = kQueueSize - 1; - - // The next tick queue is implemented as a singly-linked list of fixed-size - // circular buffers. It looks something like this: - // - // head tail - // | | - // v v - // +-----------+ <-----\ +-----------+ <------\ +-----------+ - // | [null] | \----- | next | \------- | next | - // +-----------+ +-----------+ +-----------+ - // | tick | <-- bottom | tick | <-- bottom | [empty] | - // | tick | | tick | | [empty] | - // | tick | | tick | | [empty] | - // | tick | | tick | | [empty] | - // | tick | | tick | bottom --> | tick | - // | tick | | tick | | tick | - // | ... | | ... | | ... | - // | tick | | tick | | tick | - // | tick | | tick | | tick | - // | [empty] | <-- top | tick | | tick | - // | [empty] | | tick | | tick | - // | [empty] | | tick | | tick | - // +-----------+ +-----------+ <-- top top --> +-----------+ - // - // Or, if there is only one fixed-size queue, it looks something - // like either of these: - // - // head tail head tail - // | | | | - // v v v v - // +-----------+ +-----------+ - // | [null] | | [null] | - // +-----------+ +-----------+ - // | [empty] | | tick | - // | [empty] | | tick | - // | tick | <-- bottom top --> | [empty] | - // | tick | | [empty] | - // | [empty] | <-- top bottom --> | tick | - // | [empty] | | tick | - // +-----------+ +-----------+ - // - // Adding a value means moving `top` forward by one, removing means - // moving `bottom` forward by one. - // - // We let `bottom` and `top` wrap around, so when `top` is conceptually - // pointing to the end of the list, that means that the actual value is `0`. - // - // In particular, when `top === bottom`, this can mean *either* that the - // current queue is empty or that it is full. We can differentiate by - // checking whether an entry in the queue is empty (a.k.a. `=== undefined`). - - class FixedQueue { - constructor() { - this.bottom = 0; - this.top = 0; - this.list = new Array(kQueueSize); - this.next = null; - } - - push(data) { - this.list[this.top] = data; - this.top = (this.top + 1) & kQueueMask; - } - - shift() { - const nextItem = this.list[this.bottom]; - if (nextItem === undefined) - return null; - this.list[this.bottom] = undefined; - this.bottom = (this.bottom + 1) & kQueueMask; - return nextItem; - } - } - - var head = new FixedQueue(); - var tail = head; - - function push(data) { - if (head.bottom === head.top) { - // Either empty or full: - if (head.list[head.top] !== undefined) { - // It's full: Creates a new queue, sets the old queue's `.next` to it, - // and sets it as the new main queue. - head = head.next = new FixedQueue(); - } else { - // If the head is empty, that means that it was the only fixed-sized - // queue in existence. - DCHECK_EQ(head.next, null); - // This is the first tick object in existence, so we need to inform - // the C++ side that we do want to run `_tickCallback()`. - tickInfo[kHasScheduled] = 1; - } - } - head.push(data); - } - - function shift() { - const next = tail.shift(); - if (tail.top === tail.bottom) { // -> .shift() emptied the current queue. - if (tail.next !== null) { - // If there is another queue, it forms the new tail. - tail = tail.next; - } else { - // We've just run out of items. Let the native side know that it - // doesn't need to bother calling into JS to run the queue. - tickInfo[kHasScheduled] = 0; - } - } - return next; - } + const queue = new FixedQueue(); process.nextTick = nextTick; // Needs to be accessible from beyond this scope. @@ -152,7 +41,7 @@ function setupNextTick() { function _tickCallback() { let tock; do { - while (tock = shift()) { + while (tock = queue.shift()) { const asyncId = tock[async_id_symbol]; emitBefore(asyncId, tock[trigger_async_id_symbol]); // emitDestroy() places the async_id_symbol into an asynchronous queue @@ -175,8 +64,9 @@ function setupNextTick() { emitAfter(asyncId); } + tickInfo[kHasScheduled] = 0; runMicrotasks(); - } while (head.top !== head.bottom || emitPromiseRejectionWarnings()); + } while (!queue.isEmpty() || emitPromiseRejectionWarnings()); tickInfo[kHasPromiseRejections] = 0; } @@ -222,6 +112,8 @@ function setupNextTick() { args[i - 1] = arguments[i]; } - push(new TickObject(callback, args, getDefaultTriggerAsyncId())); + if (queue.isEmpty()) + tickInfo[kHasScheduled] = 1; + queue.push(new TickObject(callback, args, getDefaultTriggerAsyncId())); } } diff --git a/lib/internal/process/stdio.js b/lib/internal/process/stdio.js index 29aee7d09bdab9..75ede6a8e7e157 100644 --- a/lib/internal/process/stdio.js +++ b/lib/internal/process/stdio.js @@ -158,7 +158,7 @@ function createWritableStdioStream(fd) { break; case 'FILE': - var fs = require('internal/fs'); + var fs = require('internal/fs/utils'); stream = new fs.SyncWriteStream(fd, { autoClose: false }); stream._type = 'fs'; break; diff --git a/lib/internal/streams/destroy.js b/lib/internal/streams/destroy.js index 2ab614e1d597da..3a0383cc3cea70 100644 --- a/lib/internal/streams/destroy.js +++ b/lib/internal/streams/destroy.js @@ -8,14 +8,10 @@ function destroy(err, cb) { this._writableState.destroyed; if (readableDestroyed || writableDestroyed) { - const readableErrored = this._readableState && - this._readableState.errorEmitted; - const writableErrored = this._writableState && - this._writableState.errorEmitted; - if (cb) { cb(err); - } else if (err && !readableErrored && !writableErrored) { + } else if (err && + (!this._writableState || !this._writableState.errorEmitted)) { process.nextTick(emitErrorNT, this, err); } return this; @@ -36,11 +32,6 @@ function destroy(err, cb) { this._destroy(err || null, (err) => { if (!cb && err) { process.nextTick(emitErrorAndCloseNT, this, err); - - if (this._readableState) { - this._readableState.errorEmitted = true; - } - if (this._writableState) { this._writableState.errorEmitted = true; } @@ -74,7 +65,6 @@ function undestroy() { this._readableState.reading = false; this._readableState.ended = false; this._readableState.endEmitted = false; - this._readableState.errorEmitted = false; } if (this._writableState) { diff --git a/lib/internal/url.js b/lib/internal/url.js index cff94e6b7d2b5b..d9daef1524787d 100644 --- a/lib/internal/url.js +++ b/lib/internal/url.js @@ -400,7 +400,9 @@ Object.defineProperties(URL.prototype, { ret += '@'; } ret += options.unicode ? - domainToUnicode(this.host) : this.host; + domainToUnicode(this.hostname) : this.hostname; + if (ctx.port !== null) + ret += `:${ctx.port}`; } else if (ctx.scheme === 'file:') { ret += '//'; } diff --git a/lib/internal/util.js b/lib/internal/util.js index 071563a737815b..07515e2e090daa 100644 --- a/lib/internal/util.js +++ b/lib/internal/util.js @@ -322,10 +322,11 @@ function join(output, separator) { return str; } -// About 1.5x faster than the two-arg version of Array#splice(). +// As of V8 6.6, depending on the size of the array, this is anywhere +// between 1.5-10x faster than the two-arg version of Array#splice() function spliceOne(list, index) { - for (var i = index, k = i + 1, n = list.length; k < n; i += 1, k += 1) - list[i] = list[k]; + for (; index + 1 < list.length; index++) + list[index] = list[index + 1]; list.pop(); } diff --git a/lib/repl.js b/lib/repl.js index 2bef57fa100170..600816a6058fbc 100644 --- a/lib/repl.js +++ b/lib/repl.js @@ -1429,9 +1429,9 @@ function defineDefaultCommands(repl) { action: function(file) { try { fs.writeFileSync(file, this.lines.join('\n') + '\n'); - this.outputStream.write('Session saved to:' + file + '\n'); + this.outputStream.write('Session saved to: ' + file + '\n'); } catch (e) { - this.outputStream.write('Failed to save:' + file + '\n'); + this.outputStream.write('Failed to save: ' + file + '\n'); } this.displayPrompt(); } @@ -1453,11 +1453,11 @@ function defineDefaultCommands(repl) { _turnOffEditorMode(this); this.write('\n'); } else { - this.outputStream.write('Failed to load:' + file + + this.outputStream.write('Failed to load: ' + file + ' is not a valid file\n'); } } catch (e) { - this.outputStream.write('Failed to load:' + file + '\n'); + this.outputStream.write('Failed to load: ' + file + '\n'); } this.displayPrompt(); } diff --git a/lib/timers.js b/lib/timers.js index 15700f5a1212ab..ff85a9f4b1d1e0 100644 --- a/lib/timers.js +++ b/lib/timers.js @@ -453,7 +453,7 @@ function rearm(timer, start = TimerWrap.now()) { } -const clearTimeout = exports.clearTimeout = function(timer) { +const clearTimeout = exports.clearTimeout = function clearTimeout(timer) { if (timer && timer._onTimeout) { timer._onTimeout = null; if (timer instanceof Timeout) { @@ -465,7 +465,7 @@ const clearTimeout = exports.clearTimeout = function(timer) { }; -exports.setInterval = function(callback, repeat, arg1, arg2, arg3) { +exports.setInterval = function setInterval(callback, repeat, arg1, arg2, arg3) { if (typeof callback !== 'function') { throw new ERR_INVALID_CALLBACK(); } @@ -774,7 +774,7 @@ setImmediate[internalUtil.promisify.custom] = function(value) { exports.setImmediate = setImmediate; -exports.clearImmediate = function(immediate) { +exports.clearImmediate = function clearImmediate(immediate) { if (!immediate || immediate._destroyed) return; diff --git a/lib/util.js b/lib/util.js index 93bcb343a9eefc..45d98de194c836 100644 --- a/lib/util.js +++ b/lib/util.js @@ -277,12 +277,12 @@ function debuglog(set) { if (!debugs[set]) { if (debugEnvRegex.test(set)) { const pid = process.pid; - debugs[set] = function() { + debugs[set] = function debug() { const msg = exports.format.apply(exports, arguments); console.error('%s %d: %s', set, pid, msg); }; } else { - debugs[set] = function() {}; + debugs[set] = function debug() {}; } } return debugs[set]; diff --git a/node.gyp b/node.gyp index ac3c28037e8613..eb9c371c982c0f 100644 --- a/node.gyp +++ b/node.gyp @@ -39,7 +39,6 @@ 'lib/domain.js', 'lib/events.js', 'lib/fs.js', - 'lib/fs/promises.js', 'lib/http.js', 'lib/http2.js', 'lib/_http_agent.js', @@ -101,8 +100,10 @@ 'lib/internal/constants.js', 'lib/internal/encoding.js', 'lib/internal/errors.js', + 'lib/internal/fixed_queue.js', 'lib/internal/freelist.js', - 'lib/internal/fs.js', + 'lib/internal/fs/promises.js', + 'lib/internal/fs/utils.js', 'lib/internal/http.js', 'lib/internal/inspector_async_hook.js', 'lib/internal/linkedlist.js', diff --git a/src/async_wrap.cc b/src/async_wrap.cc index 06dcbb4fb1d29d..f7a6d4e68dd483 100644 --- a/src/async_wrap.cc +++ b/src/async_wrap.cc @@ -123,8 +123,8 @@ RetainedObjectInfo* WrapperInfo(uint16_t class_id, Local wrapper) { Local object = wrapper.As(); CHECK_GT(object->InternalFieldCount(), 0); - AsyncWrap* wrap = Unwrap(object); - if (wrap == nullptr) return nullptr; // ClearWrap() already called. + AsyncWrap* wrap; + ASSIGN_OR_RETURN_UNWRAP(&wrap, object, nullptr); return new RetainedAsyncInfo(class_id, wrap); } @@ -231,7 +231,7 @@ class PromiseWrap : public AsyncWrap { public: PromiseWrap(Environment* env, Local object, bool silent) : AsyncWrap(env, object, PROVIDER_PROMISE, -1, silent) { - MakeWeak(this); + MakeWeak(); } size_t self_size() const override { return sizeof(*this); } diff --git a/src/base_object-inl.h b/src/base_object-inl.h index 5ff211f473b86b..11ba1c88da0486 100644 --- a/src/base_object-inl.h +++ b/src/base_object-inl.h @@ -31,54 +31,90 @@ namespace node { -inline BaseObject::BaseObject(Environment* env, v8::Local handle) +BaseObject::BaseObject(Environment* env, v8::Local handle) : persistent_handle_(env->isolate(), handle), env_(env) { CHECK_EQ(false, handle.IsEmpty()); - // The zero field holds a pointer to the handle. Immediately set it to - // nullptr in case it's accessed by the user before construction is complete. - if (handle->InternalFieldCount() > 0) - handle->SetAlignedPointerInInternalField(0, nullptr); + CHECK_GT(handle->InternalFieldCount(), 0); + handle->SetAlignedPointerInInternalField(0, static_cast(this)); } -inline Persistent& BaseObject::persistent() { +BaseObject::~BaseObject() { + if (persistent_handle_.IsEmpty()) { + // This most likely happened because the weak callback below cleared it. + return; + } + + { + v8::HandleScope handle_scope(env_->isolate()); + object()->SetAlignedPointerInInternalField(0, nullptr); + } +} + + +Persistent& BaseObject::persistent() { return persistent_handle_; } -inline v8::Local BaseObject::object() { +v8::Local BaseObject::object() { return PersistentToLocal(env_->isolate(), persistent_handle_); } -inline Environment* BaseObject::env() const { +Environment* BaseObject::env() const { return env_; } -template -inline void BaseObject::WeakCallback( - const v8::WeakCallbackInfo& data) { - delete data.GetParameter(); +BaseObject* BaseObject::FromJSObject(v8::Local obj) { + CHECK_GT(obj->InternalFieldCount(), 0); + return static_cast(obj->GetAlignedPointerFromInternalField(0)); } -template -inline void BaseObject::MakeWeak(Type* ptr) { - v8::HandleScope scope(env_->isolate()); - v8::Local handle = object(); - CHECK_GT(handle->InternalFieldCount(), 0); - Wrap(handle, ptr); - persistent_handle_.SetWeak(ptr, WeakCallback, - v8::WeakCallbackType::kParameter); +template +T* BaseObject::FromJSObject(v8::Local object) { + return static_cast(FromJSObject(object)); } -inline void BaseObject::ClearWeak() { +void BaseObject::MakeWeak() { + persistent_handle_.SetWeak( + this, + [](const v8::WeakCallbackInfo& data) { + BaseObject* obj = data.GetParameter(); + // Clear the persistent handle so that ~BaseObject() doesn't attempt + // to mess with internal fields, since the JS object may have + // transitioned into an invalid state. + // Refs: https://github.com/nodejs/node/issues/18897 + obj->persistent_handle_.Reset(); + delete obj; + }, v8::WeakCallbackType::kParameter); +} + + +void BaseObject::ClearWeak() { persistent_handle_.ClearWeak(); } + +v8::Local +BaseObject::MakeLazilyInitializedJSTemplate(Environment* env) { + auto constructor = [](const v8::FunctionCallbackInfo& args) { +#ifdef DEBUG + CHECK(args.IsConstructCall()); + CHECK_GT(args.This()->InternalFieldCount(), 0); +#endif + args.This()->SetAlignedPointerInInternalField(0, nullptr); + }; + + v8::Local t = env->NewFunctionTemplate(constructor); + t->InstanceTemplate()->SetInternalFieldCount(1); + return t; +} + } // namespace node #endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS diff --git a/src/base_object.h b/src/base_object.h index 478499bbfeb5b2..7d8281238b1c1d 100644 --- a/src/base_object.h +++ b/src/base_object.h @@ -26,6 +26,7 @@ #include "node_persistent.h" #include "v8.h" +#include // std::remove_reference namespace node { @@ -33,8 +34,10 @@ class Environment; class BaseObject { public: + // Associates this object with `handle`. It uses the 0th internal field for + // that, and in particular aborts if there is no such field. inline BaseObject(Environment* env, v8::Local handle); - virtual ~BaseObject() = default; + virtual inline ~BaseObject(); // Returns the wrapped object. Returns an empty handle when // persistent.IsEmpty() is true. @@ -44,23 +47,30 @@ class BaseObject { inline Environment* env() const; - // The handle_ must have an internal field count > 0, and the first - // index is reserved for a pointer to this class. This is an - // implicit requirement, but Node does not have a case where it's - // required that MakeWeak() be called and the internal field not - // be set. - template - inline void MakeWeak(Type* ptr); + // Get a BaseObject* pointer, or subclass pointer, for the JS object that + // was also passed to the `BaseObject()` constructor initially. + // This may return `nullptr` if the C++ object has not been constructed yet, + // e.g. when the JS object used `MakeLazilyInitializedJSTemplate`. + static inline BaseObject* FromJSObject(v8::Local object); + template + static inline T* FromJSObject(v8::Local object); + // Make the `Persistent` a weak reference and, `delete` this object once + // the JS object has been garbage collected. + inline void MakeWeak(); + + // Undo `MakeWeak()`, i.e. turn this into a strong reference. inline void ClearWeak(); + // Utility to create a FunctionTemplate with one internal field (used for + // the `BaseObject*` pointer) and a constructor that initializes that field + // to `nullptr`. + static inline v8::Local MakeLazilyInitializedJSTemplate( + Environment* env); + private: BaseObject(); - template - static inline void WeakCallback( - const v8::WeakCallbackInfo& data); - // persistent_handle_ needs to be at a fixed offset from the start of the // class because it is used by src/node_postmortem_metadata.cc to calculate // offsets and generate debug symbols for BaseObject, which assumes that the @@ -71,6 +81,22 @@ class BaseObject { Environment* env_; }; + +// Global alias for FromJSObject() to avoid churn. +template +inline T* Unwrap(v8::Local obj) { + return BaseObject::FromJSObject(obj); +} + + +#define ASSIGN_OR_RETURN_UNWRAP(ptr, obj, ...) \ + do { \ + *ptr = static_cast::type>( \ + BaseObject::FromJSObject(obj)); \ + if (*ptr == nullptr) \ + return __VA_ARGS__; \ + } while (0) + } // namespace node #endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS diff --git a/src/cares_wrap.cc b/src/cares_wrap.cc index 0cc7ed1464fdb7..4df47d75d43ba8 100644 --- a/src/cares_wrap.cc +++ b/src/cares_wrap.cc @@ -187,7 +187,7 @@ ChannelWrap::ChannelWrap(Environment* env, is_servers_default_(true), library_inited_(false), active_query_count_(0) { - MakeWeak(this); + MakeWeak(); Setup(); } @@ -205,7 +205,6 @@ class GetAddrInfoReqWrap : public ReqWrap { GetAddrInfoReqWrap(Environment* env, Local req_wrap_obj, bool verbatim); - ~GetAddrInfoReqWrap(); size_t self_size() const override { return sizeof(*this); } bool verbatim() const { return verbatim_; } @@ -219,18 +218,12 @@ GetAddrInfoReqWrap::GetAddrInfoReqWrap(Environment* env, bool verbatim) : ReqWrap(env, req_wrap_obj, AsyncWrap::PROVIDER_GETADDRINFOREQWRAP) , verbatim_(verbatim) { - Wrap(req_wrap_obj, this); -} - -GetAddrInfoReqWrap::~GetAddrInfoReqWrap() { - ClearWrap(object()); } class GetNameInfoReqWrap : public ReqWrap { public: GetNameInfoReqWrap(Environment* env, Local req_wrap_obj); - ~GetNameInfoReqWrap(); size_t self_size() const override { return sizeof(*this); } }; @@ -238,11 +231,6 @@ class GetNameInfoReqWrap : public ReqWrap { GetNameInfoReqWrap::GetNameInfoReqWrap(Environment* env, Local req_wrap_obj) : ReqWrap(env, req_wrap_obj, AsyncWrap::PROVIDER_GETNAMEINFOREQWRAP) { - Wrap(req_wrap_obj, this); -} - -GetNameInfoReqWrap::~GetNameInfoReqWrap() { - ClearWrap(object()); } @@ -587,8 +575,6 @@ class QueryWrap : public AsyncWrap { QueryWrap(ChannelWrap* channel, Local req_wrap_obj) : AsyncWrap(channel->env(), req_wrap_obj, AsyncWrap::PROVIDER_QUERYWRAP), channel_(channel) { - Wrap(req_wrap_obj, this); - // Make sure the channel object stays alive during the query lifetime. req_wrap_obj->Set(env()->context(), env()->channel_string(), @@ -597,7 +583,6 @@ class QueryWrap : public AsyncWrap { ~QueryWrap() override { CHECK_EQ(false, persistent().IsEmpty()); - ClearWrap(object()); } // Subclasses should implement the appropriate Send method. @@ -2143,14 +2128,8 @@ void Initialize(Local target, target->Set(FIXED_ONE_BYTE_STRING(env->isolate(), "AI_V4MAPPED"), Integer::New(env->isolate(), AI_V4MAPPED)); - auto is_construct_call_callback = - [](const FunctionCallbackInfo& args) { - CHECK(args.IsConstructCall()); - ClearWrap(args.This()); - }; Local aiw = - FunctionTemplate::New(env->isolate(), is_construct_call_callback); - aiw->InstanceTemplate()->SetInternalFieldCount(1); + BaseObject::MakeLazilyInitializedJSTemplate(env); AsyncWrap::AddWrapMethods(env, aiw); Local addrInfoWrapString = FIXED_ONE_BYTE_STRING(env->isolate(), "GetAddrInfoReqWrap"); @@ -2158,8 +2137,7 @@ void Initialize(Local target, target->Set(addrInfoWrapString, aiw->GetFunction()); Local niw = - FunctionTemplate::New(env->isolate(), is_construct_call_callback); - niw->InstanceTemplate()->SetInternalFieldCount(1); + BaseObject::MakeLazilyInitializedJSTemplate(env); AsyncWrap::AddWrapMethods(env, niw); Local nameInfoWrapString = FIXED_ONE_BYTE_STRING(env->isolate(), "GetNameInfoReqWrap"); @@ -2167,8 +2145,7 @@ void Initialize(Local target, target->Set(nameInfoWrapString, niw->GetFunction()); Local qrw = - FunctionTemplate::New(env->isolate(), is_construct_call_callback); - qrw->InstanceTemplate()->SetInternalFieldCount(1); + BaseObject::MakeLazilyInitializedJSTemplate(env); AsyncWrap::AddWrapMethods(env, qrw); Local queryWrapString = FIXED_ONE_BYTE_STRING(env->isolate(), "QueryReqWrap"); diff --git a/src/connect_wrap.cc b/src/connect_wrap.cc index f9ea987e05f5a3..dacdf72da7c494 100644 --- a/src/connect_wrap.cc +++ b/src/connect_wrap.cc @@ -13,12 +13,6 @@ using v8::Object; ConnectWrap::ConnectWrap(Environment* env, Local req_wrap_obj, AsyncWrap::ProviderType provider) : ReqWrap(env, req_wrap_obj, provider) { - Wrap(req_wrap_obj, this); -} - - -ConnectWrap::~ConnectWrap() { - ClearWrap(object()); } } // namespace node diff --git a/src/connect_wrap.h b/src/connect_wrap.h index 6227542bcbee50..80eae7f9bb8290 100644 --- a/src/connect_wrap.h +++ b/src/connect_wrap.h @@ -15,7 +15,6 @@ class ConnectWrap : public ReqWrap { ConnectWrap(Environment* env, v8::Local req_wrap_obj, AsyncWrap::ProviderType provider); - ~ConnectWrap(); size_t self_size() const override { return sizeof(*this); } }; diff --git a/src/connection_wrap.h b/src/connection_wrap.h index 096672efddaae2..afb168c614aa97 100644 --- a/src/connection_wrap.h +++ b/src/connection_wrap.h @@ -29,7 +29,6 @@ class ConnectionWrap : public LibuvStreamWrap { UVType handle_; }; - } // namespace node #endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS diff --git a/src/env-inl.h b/src/env-inl.h index fa241f9706ec65..6202e50548a3ce 100644 --- a/src/env-inl.h +++ b/src/env-inl.h @@ -482,12 +482,12 @@ inline void Environment::set_http_parser_buffer_in_use(bool in_use) { http_parser_buffer_in_use_ = in_use; } -inline http2::http2_state* Environment::http2_state() const { +inline http2::Http2State* Environment::http2_state() const { return http2_state_.get(); } inline void Environment::set_http2_state( - std::unique_ptr buffer) { + std::unique_ptr buffer) { CHECK(!http2_state_); // Should be set only once. http2_state_ = std::move(buffer); } @@ -583,9 +583,11 @@ inline void Environment::ThrowUVException(int errorno, inline v8::Local Environment::NewFunctionTemplate(v8::FunctionCallback callback, - v8::Local signature) { + v8::Local signature, + v8::ConstructorBehavior behavior) { v8::Local external = as_external(); - return v8::FunctionTemplate::New(isolate(), callback, external, signature); + return v8::FunctionTemplate::New(isolate(), callback, external, + signature, 0, behavior); } inline void Environment::SetMethod(v8::Local that, @@ -605,7 +607,8 @@ inline void Environment::SetProtoMethod(v8::Local that, const char* name, v8::FunctionCallback callback) { v8::Local signature = v8::Signature::New(isolate(), that); - v8::Local t = NewFunctionTemplate(callback, signature); + v8::Local t = + NewFunctionTemplate(callback, signature, v8::ConstructorBehavior::kThrow); // kInternalized strings are created in the old space. const v8::NewStringType type = v8::NewStringType::kInternalized; v8::Local name_string = diff --git a/src/env.h b/src/env.h index af4470ad8632fe..c0d79883d0ff1c 100644 --- a/src/env.h +++ b/src/env.h @@ -643,8 +643,8 @@ class Environment { inline bool http_parser_buffer_in_use() const; inline void set_http_parser_buffer_in_use(bool in_use); - inline http2::http2_state* http2_state() const; - inline void set_http2_state(std::unique_ptr state); + inline http2::Http2State* http2_state() const; + inline void set_http2_state(std::unique_ptr state); inline AliasedBuffer* fs_stats_field_array(); @@ -687,7 +687,9 @@ class Environment { inline v8::Local NewFunctionTemplate(v8::FunctionCallback callback, v8::Local signature = - v8::Local()); + v8::Local(), + v8::ConstructorBehavior behavior = + v8::ConstructorBehavior::kAllow); // Convenience methods for NewFunctionTemplate(). inline void SetMethod(v8::Local that, @@ -829,7 +831,7 @@ class Environment { char* http_parser_buffer_; bool http_parser_buffer_in_use_ = false; - std::unique_ptr http2_state_; + std::unique_ptr http2_state_; AliasedBuffer fs_stats_field_array_; diff --git a/src/fs_event_wrap.cc b/src/fs_event_wrap.cc index 1e7505d6d3219a..ed74f36719db79 100644 --- a/src/fs_event_wrap.cc +++ b/src/fs_event_wrap.cc @@ -131,7 +131,7 @@ void FSEventWrap::New(const FunctionCallbackInfo& args) { void FSEventWrap::Start(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); - FSEventWrap* wrap = Unwrap(args.Holder()); + FSEventWrap* wrap = Unwrap(args.This()); CHECK_NE(wrap, nullptr); CHECK(!wrap->initialized_); diff --git a/src/handle_wrap.cc b/src/handle_wrap.cc index a3b0209eb3121f..49bf0c55bea0a1 100644 --- a/src/handle_wrap.cc +++ b/src/handle_wrap.cc @@ -93,7 +93,6 @@ HandleWrap::HandleWrap(Environment* env, handle_(handle) { handle_->data = this; HandleScope scope(env->isolate()); - Wrap(object, this); env->handle_wrap_queue()->PushBack(this); } @@ -114,7 +113,6 @@ void HandleWrap::OnClose(uv_handle_t* handle) { if (have_close_callback) wrap->MakeCallback(env->onclose_string(), 0, nullptr); - ClearWrap(wrap->object()); delete wrap; } diff --git a/src/inspector_js_api.cc b/src/inspector_js_api.cc index 37cdcecd61dabb..ae353defe8079d 100644 --- a/src/inspector_js_api.cc +++ b/src/inspector_js_api.cc @@ -64,7 +64,6 @@ class JSBindingsConnection : public AsyncWrap { Local callback) : AsyncWrap(env, wrap, PROVIDER_INSPECTORJSBINDING), callback_(env->isolate(), callback) { - Wrap(wrap, this); Agent* inspector = env->inspector_agent(); session_ = inspector->Connect(std::unique_ptr( new JSBindingsSessionDelegate(env, this))); @@ -83,9 +82,6 @@ class JSBindingsConnection : public AsyncWrap { void Disconnect() { session_.reset(); - if (!persistent().IsEmpty()) { - ClearWrap(object()); - } delete this; } diff --git a/src/js_stream.cc b/src/js_stream.cc index ed6c6ee738e568..2293d8cf203d07 100644 --- a/src/js_stream.cc +++ b/src/js_stream.cc @@ -24,12 +24,7 @@ using v8::Value; JSStream::JSStream(Environment* env, Local obj) : AsyncWrap(env, obj, AsyncWrap::PROVIDER_JSSTREAM), StreamBase(env) { - node::Wrap(obj, this); - MakeWeak(this); -} - - -JSStream::~JSStream() { + MakeWeak(); } diff --git a/src/js_stream.h b/src/js_stream.h index 338cbe545630f1..b47a91a653ba7e 100644 --- a/src/js_stream.h +++ b/src/js_stream.h @@ -16,8 +16,6 @@ class JSStream : public AsyncWrap, public StreamBase { v8::Local unused, v8::Local context); - ~JSStream(); - bool IsAlive() override; bool IsClosing() override; int ReadStart() override; diff --git a/src/module_wrap.cc b/src/module_wrap.cc index 9bcdb4dce75ff2..fcccbfe93cea32 100644 --- a/src/module_wrap.cc +++ b/src/module_wrap.cc @@ -158,7 +158,6 @@ void ModuleWrap::New(const FunctionCallbackInfo& args) { obj->context_.Reset(isolate, context); env->module_map.emplace(module->GetIdentityHash(), obj); - Wrap(that, obj); that->SetIntegrityLevel(context, IntegrityLevel::kFrozen); args.GetReturnValue().Set(that); diff --git a/src/node.cc b/src/node.cc index 0472e55a5a2e10..693457d3ed0aae 100644 --- a/src/node.cc +++ b/src/node.cc @@ -4439,6 +4439,11 @@ void FreeEnvironment(Environment* env) { } +MultiIsolatePlatform* GetMainThreadMultiIsolatePlatform() { + return v8_platform.Platform(); +} + + MultiIsolatePlatform* CreatePlatform( int thread_pool_size, v8::TracingController* tracing_controller) { diff --git a/src/node.h b/src/node.h index ab5d1c120fa007..5a491c1abf5457 100644 --- a/src/node.h +++ b/src/node.h @@ -251,6 +251,11 @@ NODE_EXTERN Environment* CreateEnvironment(IsolateData* isolate_data, NODE_EXTERN void LoadEnvironment(Environment* env); NODE_EXTERN void FreeEnvironment(Environment* env); +// This returns the MultiIsolatePlatform used in the main thread of Node.js. +// If NODE_USE_V8_PLATFORM haven't been defined when Node.js was built, +// it returns nullptr. +NODE_EXTERN MultiIsolatePlatform* GetMainThreadMultiIsolatePlatform(); + NODE_EXTERN MultiIsolatePlatform* CreatePlatform( int thread_pool_size, v8::TracingController* tracing_controller); diff --git a/src/node_contextify.cc b/src/node_contextify.cc index e07d5ebcd29d0d..6e7027a29ba8a0 100644 --- a/src/node_contextify.cc +++ b/src/node_contextify.cc @@ -120,7 +120,7 @@ Local ContextifyContext::CreateDataWrapper(Environment* env) { if (wrapper.IsEmpty()) return scope.Escape(Local::New(env->isolate(), Local())); - Wrap(wrapper, this); + wrapper->SetAlignedPointerInInternalField(0, this); return scope.Escape(wrapper); } @@ -290,12 +290,19 @@ ContextifyContext* ContextifyContext::ContextFromContextifiedSandbox( return nullptr; } +// static +template +ContextifyContext* ContextifyContext::Get(const PropertyCallbackInfo& args) { + Local data = args.Data(); + return static_cast( + data.As()->GetAlignedPointerFromInternalField(0)); +} + // static void ContextifyContext::PropertyGetterCallback( Local property, const PropertyCallbackInfo& args) { - ContextifyContext* ctx; - ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Data().As()); + ContextifyContext* ctx = ContextifyContext::Get(args); // Still initializing if (ctx->context_.IsEmpty()) @@ -324,8 +331,7 @@ void ContextifyContext::PropertySetterCallback( Local property, Local value, const PropertyCallbackInfo& args) { - ContextifyContext* ctx; - ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Data().As()); + ContextifyContext* ctx = ContextifyContext::Get(args); // Still initializing if (ctx->context_.IsEmpty()) @@ -385,8 +391,7 @@ void ContextifyContext::PropertySetterCallback( void ContextifyContext::PropertyDescriptorCallback( Local property, const PropertyCallbackInfo& args) { - ContextifyContext* ctx; - ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Data().As()); + ContextifyContext* ctx = ContextifyContext::Get(args); // Still initializing if (ctx->context_.IsEmpty()) @@ -408,8 +413,7 @@ void ContextifyContext::PropertyDefinerCallback( Local property, const PropertyDescriptor& desc, const PropertyCallbackInfo& args) { - ContextifyContext* ctx; - ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Data().As()); + ContextifyContext* ctx = ContextifyContext::Get(args); // Still initializing if (ctx->context_.IsEmpty()) @@ -471,8 +475,7 @@ void ContextifyContext::PropertyDefinerCallback( void ContextifyContext::PropertyDeleterCallback( Local property, const PropertyCallbackInfo& args) { - ContextifyContext* ctx; - ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Data().As()); + ContextifyContext* ctx = ContextifyContext::Get(args); // Still initializing if (ctx->context_.IsEmpty()) @@ -491,8 +494,7 @@ void ContextifyContext::PropertyDeleterCallback( // static void ContextifyContext::PropertyEnumeratorCallback( const PropertyCallbackInfo& args) { - ContextifyContext* ctx; - ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Data().As()); + ContextifyContext* ctx = ContextifyContext::Get(args); // Still initializing if (ctx->context_.IsEmpty()) @@ -505,8 +507,7 @@ void ContextifyContext::PropertyEnumeratorCallback( void ContextifyContext::IndexedPropertyGetterCallback( uint32_t index, const PropertyCallbackInfo& args) { - ContextifyContext* ctx; - ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Data().As()); + ContextifyContext* ctx = ContextifyContext::Get(args); // Still initializing if (ctx->context_.IsEmpty()) @@ -521,8 +522,7 @@ void ContextifyContext::IndexedPropertySetterCallback( uint32_t index, Local value, const PropertyCallbackInfo& args) { - ContextifyContext* ctx; - ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Data().As()); + ContextifyContext* ctx = ContextifyContext::Get(args); // Still initializing if (ctx->context_.IsEmpty()) @@ -536,8 +536,7 @@ void ContextifyContext::IndexedPropertySetterCallback( void ContextifyContext::IndexedPropertyDescriptorCallback( uint32_t index, const PropertyCallbackInfo& args) { - ContextifyContext* ctx; - ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Data().As()); + ContextifyContext* ctx = ContextifyContext::Get(args); // Still initializing if (ctx->context_.IsEmpty()) @@ -552,8 +551,7 @@ void ContextifyContext::IndexedPropertyDefinerCallback( uint32_t index, const PropertyDescriptor& desc, const PropertyCallbackInfo& args) { - ContextifyContext* ctx; - ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Data().As()); + ContextifyContext* ctx = ContextifyContext::Get(args); // Still initializing if (ctx->context_.IsEmpty()) @@ -567,8 +565,7 @@ void ContextifyContext::IndexedPropertyDefinerCallback( void ContextifyContext::IndexedPropertyDeleterCallback( uint32_t index, const PropertyCallbackInfo& args) { - ContextifyContext* ctx; - ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Data().As()); + ContextifyContext* ctx = ContextifyContext::Get(args); // Still initializing if (ctx->context_.IsEmpty()) @@ -887,7 +884,7 @@ class ContextifyScript : public BaseObject { ContextifyScript(Environment* env, Local object) : BaseObject(env, object) { - MakeWeak(this); + MakeWeak(); } }; diff --git a/src/node_contextify.h b/src/node_contextify.h index 565b8ef856ea49..70ce091af3b58e 100644 --- a/src/node_contextify.h +++ b/src/node_contextify.h @@ -55,6 +55,9 @@ class ContextifyContext { context()->GetEmbedderData(ContextEmbedderIndex::kSandboxObject)); } + template + static ContextifyContext* Get(const v8::PropertyCallbackInfo& args); + private: static void MakeContext(const v8::FunctionCallbackInfo& args); static void IsContext(const v8::FunctionCallbackInfo& args); diff --git a/src/node_crypto.cc b/src/node_crypto.cc index c195868033f968..c1d508c0ea6949 100644 --- a/src/node_crypto.cc +++ b/src/node_crypto.cc @@ -90,9 +90,21 @@ using v8::Value; struct StackOfX509Deleter { void operator()(STACK_OF(X509)* p) const { sk_X509_pop_free(p, X509_free); } }; - using StackOfX509 = std::unique_ptr; +struct StackOfXASN1Deleter { + void operator()(STACK_OF(ASN1_OBJECT)* p) const { + sk_ASN1_OBJECT_pop_free(p, ASN1_OBJECT_free); + } +}; +using StackOfASN1 = std::unique_ptr; + +// OPENSSL_free is a macro, so we need a wrapper function. +struct OpenSSLBufferDeleter { + void operator()(char* pointer) const { OPENSSL_free(pointer); } +}; +using OpenSSLBuffer = std::unique_ptr; + static const char* const root_certs[] = { #include "node_root_certs.h" // NOLINT(build/include_order) }; @@ -424,24 +436,24 @@ void SecureContext::Init(const FunctionCallbackInfo& args) { } } - sc->ctx_ = SSL_CTX_new(method); - SSL_CTX_set_app_data(sc->ctx_, sc); + sc->ctx_.reset(SSL_CTX_new(method)); + SSL_CTX_set_app_data(sc->ctx_.get(), sc); // Disable SSLv2 in the case when method == TLS_method() and the // cipher list contains SSLv2 ciphers (not the default, should be rare.) // The bundled OpenSSL doesn't have SSLv2 support but the system OpenSSL may. // SSLv3 is disabled because it's susceptible to downgrade attacks (POODLE.) - SSL_CTX_set_options(sc->ctx_, SSL_OP_NO_SSLv2); - SSL_CTX_set_options(sc->ctx_, SSL_OP_NO_SSLv3); + SSL_CTX_set_options(sc->ctx_.get(), SSL_OP_NO_SSLv2); + SSL_CTX_set_options(sc->ctx_.get(), SSL_OP_NO_SSLv3); // SSL session cache configuration - SSL_CTX_set_session_cache_mode(sc->ctx_, + SSL_CTX_set_session_cache_mode(sc->ctx_.get(), SSL_SESS_CACHE_SERVER | SSL_SESS_CACHE_NO_INTERNAL | SSL_SESS_CACHE_NO_AUTO_CLEAR); - SSL_CTX_set_min_proto_version(sc->ctx_, min_version); - SSL_CTX_set_max_proto_version(sc->ctx_, max_version); + SSL_CTX_set_min_proto_version(sc->ctx_.get(), min_version); + SSL_CTX_set_max_proto_version(sc->ctx_.get(), max_version); // OpenSSL 1.1.0 changed the ticket key size, but the OpenSSL 1.0.x size was // exposed in the public API. To retain compatibility, install a callback // which restores the old algorithm. @@ -450,7 +462,7 @@ void SecureContext::Init(const FunctionCallbackInfo& args) { RAND_bytes(sc->ticket_key_aes_, sizeof(sc->ticket_key_aes_)) <= 0) { return env->ThrowError("Error generating ticket keys"); } - SSL_CTX_set_tlsext_ticket_key_cb(sc->ctx_, + SSL_CTX_set_tlsext_ticket_key_cb(sc->ctx_.get(), SecureContext::TicketCompatibilityCallback); } @@ -495,19 +507,19 @@ void SecureContext::SetKey(const FunctionCallbackInfo& args) { THROW_AND_RETURN_IF_NOT_STRING(env, args[1], "Pass phrase"); } - BIO *bio = LoadBIO(env, args[0]); + BIOPointer bio(LoadBIO(env, args[0])); if (!bio) return; node::Utf8Value passphrase(env->isolate(), args[1]); - EVP_PKEY* key = PEM_read_bio_PrivateKey(bio, - nullptr, - PasswordCallback, - len == 1 ? nullptr : *passphrase); + EVPKeyPointer key( + PEM_read_bio_PrivateKey(bio.get(), + nullptr, + PasswordCallback, + len == 1 ? nullptr : *passphrase)); if (!key) { - BIO_free_all(bio); unsigned long err = ERR_get_error(); // NOLINT(runtime/int) if (!err) { return env->ThrowError("PEM_read_bio_PrivateKey"); @@ -515,9 +527,7 @@ void SecureContext::SetKey(const FunctionCallbackInfo& args) { return ThrowCryptoError(env, err); } - int rv = SSL_CTX_use_PrivateKey(sc->ctx_, key); - EVP_PKEY_free(key); - BIO_free_all(bio); + int rv = SSL_CTX_use_PrivateKey(sc->ctx_.get(), key.get()); if (!rv) { unsigned long err = ERR_get_error(); // NOLINT(runtime/int) @@ -530,24 +540,24 @@ void SecureContext::SetKey(const FunctionCallbackInfo& args) { int SSL_CTX_get_issuer(SSL_CTX* ctx, X509* cert, X509** issuer) { X509_STORE* store = SSL_CTX_get_cert_store(ctx); - X509_STORE_CTX* store_ctx = X509_STORE_CTX_new(); - int ret = store_ctx != nullptr && - X509_STORE_CTX_init(store_ctx, store, nullptr, nullptr) == 1 && - X509_STORE_CTX_get1_issuer(issuer, store_ctx, cert) == 1; - X509_STORE_CTX_free(store_ctx); - return ret; + DeleteFnPtr store_ctx( + X509_STORE_CTX_new()); + return store_ctx.get() != nullptr && + X509_STORE_CTX_init(store_ctx.get(), store, nullptr, nullptr) == 1 && + X509_STORE_CTX_get1_issuer(issuer, store_ctx.get(), cert) == 1; } int SSL_CTX_use_certificate_chain(SSL_CTX* ctx, - X509* x, + X509Pointer&& x, STACK_OF(X509)* extra_certs, - X509** cert, - X509** issuer) { - CHECK_EQ(*issuer, nullptr); - CHECK_EQ(*cert, nullptr); + X509Pointer* cert, + X509Pointer* issuer_) { + CHECK(!*issuer_); + CHECK(!*cert); + X509* issuer = nullptr; - int ret = SSL_CTX_use_certificate(ctx, x); + int ret = SSL_CTX_use_certificate(ctx, x.get()); if (ret) { // If we could set up our certificate, now proceed to @@ -564,7 +574,7 @@ int SSL_CTX_use_certificate_chain(SSL_CTX* ctx, if (!r) { ret = 0; - *issuer = nullptr; + issuer = nullptr; goto end; } // Note that we must not free r if it was successfully @@ -573,24 +583,24 @@ int SSL_CTX_use_certificate_chain(SSL_CTX* ctx, // by SSL_CTX_use_certificate). // Find issuer - if (*issuer != nullptr || X509_check_issued(ca, x) != X509_V_OK) + if (issuer != nullptr || X509_check_issued(ca, x.get()) != X509_V_OK) continue; - *issuer = ca; + issuer = ca; } } // Try getting issuer from a cert store if (ret) { - if (*issuer == nullptr) { - ret = SSL_CTX_get_issuer(ctx, x, issuer); + if (issuer == nullptr) { + ret = SSL_CTX_get_issuer(ctx, x.get(), &issuer); ret = ret < 0 ? 0 : 1; // NOTE: get_cert_store doesn't increment reference count, // no need to free `store` } else { // Increment issuer reference count - *issuer = X509_dup(*issuer); - if (*issuer == nullptr) { + issuer = X509_dup(issuer); + if (issuer == nullptr) { ret = 0; goto end; } @@ -598,9 +608,11 @@ int SSL_CTX_use_certificate_chain(SSL_CTX* ctx, } end: + issuer_->reset(issuer); + if (ret && x != nullptr) { - *cert = X509_dup(x); - if (*cert == nullptr) + cert->reset(X509_dup(x.get())); + if (!*cert) ret = 0; } return ret; @@ -613,21 +625,20 @@ int SSL_CTX_use_certificate_chain(SSL_CTX* ctx, // // Taken from OpenSSL - edited for style. int SSL_CTX_use_certificate_chain(SSL_CTX* ctx, - BIO* in, - X509** cert, - X509** issuer) { - X509* x = nullptr; - + BIOPointer&& in, + X509Pointer* cert, + X509Pointer* issuer) { // Just to ensure that `ERR_peek_last_error` below will return only errors // that we are interested in ERR_clear_error(); - x = PEM_read_bio_X509_AUX(in, nullptr, NoPasswordCallback, nullptr); + X509Pointer x( + PEM_read_bio_X509_AUX(in.get(), nullptr, NoPasswordCallback, nullptr)); - if (x == nullptr) { + if (!x) return 0; - } + // TODO(addaleax): Turn this into smart pointer as well. X509* extra = nullptr; int ret = 0; unsigned long err = 0; // NOLINT(runtime/int) @@ -636,7 +647,7 @@ int SSL_CTX_use_certificate_chain(SSL_CTX* ctx, if (!extra_certs) goto done; - while ((extra = PEM_read_bio_X509(in, + while ((extra = PEM_read_bio_X509(in.get(), nullptr, NoPasswordCallback, nullptr))) { @@ -658,15 +669,17 @@ int SSL_CTX_use_certificate_chain(SSL_CTX* ctx, goto done; } - ret = SSL_CTX_use_certificate_chain(ctx, x, extra_certs.get(), cert, issuer); + ret = SSL_CTX_use_certificate_chain(ctx, + std::move(x), + extra_certs.get(), + cert, + issuer); if (!ret) goto done; done: if (extra != nullptr) X509_free(extra); - if (x != nullptr) - X509_free(x); return ret; } @@ -682,27 +695,18 @@ void SecureContext::SetCert(const FunctionCallbackInfo& args) { return THROW_ERR_MISSING_ARGS(env, "Certificate argument is mandatory"); } - BIO* bio = LoadBIO(env, args[0]); + BIOPointer bio(LoadBIO(env, args[0])); if (!bio) return; - // Free previous certs - if (sc->issuer_ != nullptr) { - X509_free(sc->issuer_); - sc->issuer_ = nullptr; - } - if (sc->cert_ != nullptr) { - X509_free(sc->cert_); - sc->cert_ = nullptr; - } + sc->cert_.reset(); + sc->issuer_.reset(); - int rv = SSL_CTX_use_certificate_chain(sc->ctx_, - bio, + int rv = SSL_CTX_use_certificate_chain(sc->ctx_.get(), + std::move(bio), &sc->cert_, &sc->issuer_); - BIO_free_all(bio); - if (!rv) { unsigned long err = ERR_get_error(); // NOLINT(runtime/int) if (!err) { @@ -756,24 +760,21 @@ void SecureContext::AddCACert(const FunctionCallbackInfo& args) { return THROW_ERR_MISSING_ARGS(env, "CA certificate argument is mandatory"); } - BIO* bio = LoadBIO(env, args[0]); - if (!bio) { + BIOPointer bio(LoadBIO(env, args[0])); + if (!bio) return; - } - X509_STORE* cert_store = SSL_CTX_get_cert_store(sc->ctx_); - while (X509* x509 = - PEM_read_bio_X509(bio, nullptr, NoPasswordCallback, nullptr)) { + X509_STORE* cert_store = SSL_CTX_get_cert_store(sc->ctx_.get()); + while (X509* x509 = PEM_read_bio_X509( + bio.get(), nullptr, NoPasswordCallback, nullptr)) { if (cert_store == root_cert_store) { cert_store = NewRootCertStore(); - SSL_CTX_set_cert_store(sc->ctx_, cert_store); + SSL_CTX_set_cert_store(sc->ctx_.get(), cert_store); } X509_STORE_add_cert(cert_store, x509); - SSL_CTX_add_client_CA(sc->ctx_, x509); + SSL_CTX_add_client_CA(sc->ctx_.get(), x509); X509_free(x509); } - - BIO_free_all(bio); } @@ -789,30 +790,25 @@ void SecureContext::AddCRL(const FunctionCallbackInfo& args) { ClearErrorOnReturn clear_error_on_return; - BIO *bio = LoadBIO(env, args[0]); + BIOPointer bio(LoadBIO(env, args[0])); if (!bio) return; - X509_CRL* crl = - PEM_read_bio_X509_CRL(bio, nullptr, NoPasswordCallback, nullptr); + DeleteFnPtr crl( + PEM_read_bio_X509_CRL(bio.get(), nullptr, NoPasswordCallback, nullptr)); - if (crl == nullptr) { - BIO_free_all(bio); + if (!crl) return env->ThrowError("Failed to parse CRL"); - } - X509_STORE* cert_store = SSL_CTX_get_cert_store(sc->ctx_); + X509_STORE* cert_store = SSL_CTX_get_cert_store(sc->ctx_.get()); if (cert_store == root_cert_store) { cert_store = NewRootCertStore(); - SSL_CTX_set_cert_store(sc->ctx_, cert_store); + SSL_CTX_set_cert_store(sc->ctx_.get(), cert_store); } - X509_STORE_add_crl(cert_store, crl); + X509_STORE_add_crl(cert_store, crl.get()); X509_STORE_set_flags(cert_store, X509_V_FLAG_CRL_CHECK | X509_V_FLAG_CRL_CHECK_ALL); - - BIO_free_all(bio); - X509_CRL_free(crl); } @@ -827,17 +823,15 @@ static unsigned long AddCertsFromFile( // NOLINT(runtime/int) ERR_clear_error(); MarkPopErrorOnReturn mark_pop_error_on_return; - BIO* bio = BIO_new_file(file, "r"); - if (!bio) { + BIOPointer bio(BIO_new_file(file, "r")); + if (!bio) return ERR_get_error(); - } while (X509* x509 = - PEM_read_bio_X509(bio, nullptr, NoPasswordCallback, nullptr)) { + PEM_read_bio_X509(bio.get(), nullptr, NoPasswordCallback, nullptr)) { X509_STORE_add_cert(store, x509); X509_free(x509); } - BIO_free_all(bio); unsigned long err = ERR_peek_error(); // NOLINT(runtime/int) // Ignore error if its EOF/no start line found. @@ -876,7 +870,7 @@ void SecureContext::AddRootCerts(const FunctionCallbackInfo& args) { // Increment reference count so global store is not deleted along with CTX. X509_STORE_up_ref(root_cert_store); - SSL_CTX_set_cert_store(sc->ctx_, root_cert_store); + SSL_CTX_set_cert_store(sc->ctx_.get(), root_cert_store); } @@ -893,7 +887,7 @@ void SecureContext::SetCiphers(const FunctionCallbackInfo& args) { THROW_AND_RETURN_IF_NOT_STRING(env, args[0], "Ciphers"); const node::Utf8Value ciphers(args.GetIsolate(), args[0]); - SSL_CTX_set_cipher_list(sc->ctx_, *ciphers); + SSL_CTX_set_cipher_list(sc->ctx_.get(), *ciphers); } @@ -912,7 +906,7 @@ void SecureContext::SetECDHCurve(const FunctionCallbackInfo& args) { if (strcmp(*curve, "auto") == 0) return; - if (!SSL_CTX_set1_curves_list(sc->ctx_, *curve)) + if (!SSL_CTX_set1_curves_list(sc->ctx_.get(), *curve)) return env->ThrowError("Failed to set ECDH curve"); } @@ -928,19 +922,21 @@ void SecureContext::SetDHParam(const FunctionCallbackInfo& args) { if (args.Length() != 1) return THROW_ERR_MISSING_ARGS(env, "DH argument is mandatory"); - // Invalid dhparam is silently discarded and DHE is no longer used. - BIO* bio = LoadBIO(env, args[0]); - if (!bio) - return; + DHPointer dh; + { + BIOPointer bio(LoadBIO(env, args[0])); + if (!bio) + return; - DH* dh = PEM_read_bio_DHparams(bio, nullptr, nullptr, nullptr); - BIO_free_all(bio); + dh.reset(PEM_read_bio_DHparams(bio.get(), nullptr, nullptr, nullptr)); + } - if (dh == nullptr) + // Invalid dhparam is silently discarded and DHE is no longer used. + if (!dh) return; const BIGNUM* p; - DH_get0_pqg(dh, &p, nullptr, nullptr); + DH_get0_pqg(dh.get(), &p, nullptr, nullptr); const int size = BN_num_bits(p); if (size < 1024) { return THROW_ERR_INVALID_ARG_VALUE( @@ -950,9 +946,8 @@ void SecureContext::SetDHParam(const FunctionCallbackInfo& args) { env->isolate(), "DH parameter is less than 2048 bits")); } - SSL_CTX_set_options(sc->ctx_, SSL_OP_SINGLE_DH_USE); - int r = SSL_CTX_set_tmp_dh(sc->ctx_, dh); - DH_free(dh); + SSL_CTX_set_options(sc->ctx_.get(), SSL_OP_SINGLE_DH_USE); + int r = SSL_CTX_set_tmp_dh(sc->ctx_.get(), dh.get()); if (!r) return env->ThrowTypeError("Error setting temp DH parameter"); @@ -969,7 +964,7 @@ void SecureContext::SetOptions(const FunctionCallbackInfo& args) { } SSL_CTX_set_options( - sc->ctx_, + sc->ctx_.get(), static_cast(args[0]->IntegerValue())); // NOLINT(runtime/int) } @@ -992,23 +987,21 @@ void SecureContext::SetSessionIdContext( reinterpret_cast(*sessionIdContext); unsigned int sid_ctx_len = sessionIdContext.length(); - int r = SSL_CTX_set_session_id_context(sc->ctx_, sid_ctx, sid_ctx_len); + int r = SSL_CTX_set_session_id_context(sc->ctx_.get(), sid_ctx, sid_ctx_len); if (r == 1) return; - BIO* bio; BUF_MEM* mem; Local message; - bio = BIO_new(BIO_s_mem()); - if (bio == nullptr) { + BIOPointer bio(BIO_new(BIO_s_mem())); + if (!bio) { message = FIXED_ONE_BYTE_STRING(args.GetIsolate(), "SSL_CTX_set_session_id_context error"); } else { - ERR_print_errors(bio); - BIO_get_mem_ptr(bio, &mem); + ERR_print_errors(bio.get()); + BIO_get_mem_ptr(bio.get(), &mem); message = OneByteString(args.GetIsolate(), mem->data, mem->length); - BIO_free_all(bio); } args.GetIsolate()->ThrowException(Exception::TypeError(message)); @@ -1025,14 +1018,14 @@ void SecureContext::SetSessionTimeout(const FunctionCallbackInfo& args) { } int32_t sessionTimeout = args[0]->Int32Value(); - SSL_CTX_set_timeout(sc->ctx_, sessionTimeout); + SSL_CTX_set_timeout(sc->ctx_.get(), sessionTimeout); } void SecureContext::Close(const FunctionCallbackInfo& args) { SecureContext* sc; ASSIGN_OR_RETURN_UNWRAP(&sc, args.Holder()); - sc->FreeCTXMem(); + sc->Reset(); } @@ -1040,12 +1033,7 @@ void SecureContext::Close(const FunctionCallbackInfo& args) { void SecureContext::LoadPKCS12(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); - BIO* in = nullptr; - PKCS12* p12 = nullptr; - EVP_PKEY* pkey = nullptr; - X509* cert = nullptr; - STACK_OF(X509)* extra_certs = nullptr; - char* pass = nullptr; + std::vector pass; bool ret = false; SecureContext* sc; @@ -1056,64 +1044,61 @@ void SecureContext::LoadPKCS12(const FunctionCallbackInfo& args) { return THROW_ERR_MISSING_ARGS(env, "PFX certificate argument is mandatory"); } - in = LoadBIO(env, args[0]); - if (in == nullptr) { + BIOPointer in(LoadBIO(env, args[0])); + if (!in) return env->ThrowError("Unable to load BIO"); - } if (args.Length() >= 2) { THROW_AND_RETURN_IF_NOT_BUFFER(env, args[1], "Pass phrase"); size_t passlen = Buffer::Length(args[1]); - pass = new char[passlen + 1]; - memcpy(pass, Buffer::Data(args[1]), passlen); + pass.resize(passlen + 1); + memcpy(pass.data(), Buffer::Data(args[1]), passlen); pass[passlen] = '\0'; } // Free previous certs - if (sc->issuer_ != nullptr) { - X509_free(sc->issuer_); - sc->issuer_ = nullptr; - } - if (sc->cert_ != nullptr) { - X509_free(sc->cert_); - sc->cert_ = nullptr; - } - - X509_STORE* cert_store = SSL_CTX_get_cert_store(sc->ctx_); - - if (d2i_PKCS12_bio(in, &p12) && - PKCS12_parse(p12, pass, &pkey, &cert, &extra_certs) && - SSL_CTX_use_certificate_chain(sc->ctx_, - cert, - extra_certs, + sc->issuer_.reset(); + sc->cert_.reset(); + + X509_STORE* cert_store = SSL_CTX_get_cert_store(sc->ctx_.get()); + + DeleteFnPtr p12; + EVPKeyPointer pkey; + X509Pointer cert; + StackOfX509 extra_certs; + + PKCS12* p12_ptr = nullptr; + EVP_PKEY* pkey_ptr = nullptr; + X509* cert_ptr = nullptr; + STACK_OF(X509)* extra_certs_ptr = nullptr; + if (d2i_PKCS12_bio(in.get(), &p12_ptr) && + (p12.reset(p12_ptr), true) && // Move ownership to the smart pointer. + PKCS12_parse(p12.get(), pass.data(), + &pkey_ptr, + &cert_ptr, + &extra_certs_ptr) && + (pkey.reset(pkey_ptr), cert.reset(cert_ptr), + extra_certs.reset(extra_certs_ptr), true) && // Move ownership. + SSL_CTX_use_certificate_chain(sc->ctx_.get(), + std::move(cert), + extra_certs.get(), &sc->cert_, &sc->issuer_) && - SSL_CTX_use_PrivateKey(sc->ctx_, pkey)) { + SSL_CTX_use_PrivateKey(sc->ctx_.get(), pkey.get())) { // Add CA certs too - for (int i = 0; i < sk_X509_num(extra_certs); i++) { - X509* ca = sk_X509_value(extra_certs, i); + for (int i = 0; i < sk_X509_num(extra_certs.get()); i++) { + X509* ca = sk_X509_value(extra_certs.get(), i); if (cert_store == root_cert_store) { cert_store = NewRootCertStore(); - SSL_CTX_set_cert_store(sc->ctx_, cert_store); + SSL_CTX_set_cert_store(sc->ctx_.get(), cert_store); } X509_STORE_add_cert(cert_store, ca); - SSL_CTX_add_client_CA(sc->ctx_, ca); + SSL_CTX_add_client_CA(sc->ctx_.get(), ca); } ret = true; } - if (pkey != nullptr) - EVP_PKEY_free(pkey); - if (cert != nullptr) - X509_free(cert); - if (extra_certs != nullptr) - sk_X509_pop_free(extra_certs, X509_free); - - PKCS12_free(p12); - BIO_free_all(in); - delete[] pass; - if (!ret) { unsigned long err = ERR_get_error(); // NOLINT(runtime/int) const char* str = ERR_reason_error_string(err); @@ -1123,6 +1108,9 @@ void SecureContext::LoadPKCS12(const FunctionCallbackInfo& args) { #ifndef OPENSSL_NO_ENGINE +// Helper for the smart pointer. +void ENGINE_free_fn(ENGINE* engine) { ENGINE_free(engine); } + void SecureContext::SetClientCertEngine( const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); @@ -1146,18 +1134,16 @@ void SecureContext::SetClientCertEngine( const node::Utf8Value engine_id(env->isolate(), args[0]); char errmsg[1024]; - ENGINE* engine = LoadEngineById(*engine_id, &errmsg); + DeleteFnPtr engine( + LoadEngineById(*engine_id, &errmsg)); - if (engine == nullptr) { + if (!engine) return env->ThrowError(errmsg); - } - int r = SSL_CTX_set_client_cert_engine(sc->ctx_, engine); - // Free reference (SSL_CTX_set_client_cert_engine took it via ENGINE_init). - ENGINE_free(engine); - if (r == 0) { + // Note that this takes another reference to `engine`. + int r = SSL_CTX_set_client_cert_engine(sc->ctx_.get(), engine.get()); + if (r == 0) return ThrowCryptoError(env, ERR_get_error()); - } sc->client_cert_engine_provided_ = true; } #endif // !OPENSSL_NO_ENGINE @@ -1216,7 +1202,7 @@ void SecureContext::EnableTicketKeyCallback( SecureContext* wrap; ASSIGN_OR_RETURN_UNWRAP(&wrap, args.Holder()); - SSL_CTX_set_tlsext_ticket_key_cb(wrap->ctx_, TicketKeyCallback); + SSL_CTX_set_tlsext_ticket_key_cb(wrap->ctx_.get(), TicketKeyCallback); } @@ -1345,9 +1331,9 @@ void SecureContext::GetCertificate(const FunctionCallbackInfo& args) { X509* cert; if (primary) - cert = wrap->cert_; + cert = wrap->cert_.get(); else - cert = wrap->issuer_; + cert = wrap->issuer_.get(); if (cert == nullptr) return args.GetReturnValue().SetNull(); @@ -1399,8 +1385,8 @@ template void SSLWrap::ConfigureSecureContext(SecureContext* sc) { #ifdef NODE__HAVE_TLSEXT_STATUS_CB // OCSP stapling - SSL_CTX_set_tlsext_status_cb(sc->ctx_, TLSExtStatusCallback); - SSL_CTX_set_tlsext_status_arg(sc->ctx_, nullptr); + SSL_CTX_set_tlsext_status_cb(sc->ctx_.get(), TLSExtStatusCallback); + SSL_CTX_set_tlsext_status_arg(sc->ctx_.get(), nullptr); #endif // NODE__HAVE_TLSEXT_STATUS_CB } @@ -1413,10 +1399,7 @@ SSL_SESSION* SSLWrap::GetSessionCallback(SSL* s, Base* w = static_cast(SSL_get_app_data(s)); *copy = 0; - SSL_SESSION* sess = w->next_sess_; - w->next_sess_ = nullptr; - - return sess; + return w->next_sess_.release(); } @@ -1554,29 +1537,29 @@ static Local X509ToObject(Environment* env, X509* cert) { Local context = env->context(); Local info = Object::New(env->isolate()); - BIO* bio = BIO_new(BIO_s_mem()); + BIOPointer bio(BIO_new(BIO_s_mem())); BUF_MEM* mem; - if (X509_NAME_print_ex(bio, + if (X509_NAME_print_ex(bio.get(), X509_get_subject_name(cert), 0, X509_NAME_FLAGS) > 0) { - BIO_get_mem_ptr(bio, &mem); + BIO_get_mem_ptr(bio.get(), &mem); info->Set(context, env->subject_string(), String::NewFromUtf8(env->isolate(), mem->data, String::kNormalString, mem->length)).FromJust(); } - USE(BIO_reset(bio)); + USE(BIO_reset(bio.get())); X509_NAME* issuer_name = X509_get_issuer_name(cert); - if (X509_NAME_print_ex(bio, issuer_name, 0, X509_NAME_FLAGS) > 0) { - BIO_get_mem_ptr(bio, &mem); + if (X509_NAME_print_ex(bio.get(), issuer_name, 0, X509_NAME_FLAGS) > 0) { + BIO_get_mem_ptr(bio.get(), &mem); info->Set(context, env->issuer_string(), String::NewFromUtf8(env->isolate(), mem->data, String::kNormalString, mem->length)).FromJust(); } - USE(BIO_reset(bio)); + USE(BIO_reset(bio.get())); int nids[] = { NID_subject_alt_name, NID_info_access }; Local keys[] = { env->subjectaltname_string(), @@ -1593,85 +1576,79 @@ static Local X509ToObject(Environment* env, X509* cert) { ext = X509_get_ext(cert, index); CHECK_NE(ext, nullptr); - if (!SafeX509ExtPrint(bio, ext)) { - rv = X509V3_EXT_print(bio, ext, 0, 0); + if (!SafeX509ExtPrint(bio.get(), ext)) { + rv = X509V3_EXT_print(bio.get(), ext, 0, 0); CHECK_EQ(rv, 1); } - BIO_get_mem_ptr(bio, &mem); + BIO_get_mem_ptr(bio.get(), &mem); info->Set(context, keys[i], String::NewFromUtf8(env->isolate(), mem->data, String::kNormalString, mem->length)).FromJust(); - USE(BIO_reset(bio)); + USE(BIO_reset(bio.get())); } - EVP_PKEY* pkey = X509_get_pubkey(cert); - RSA* rsa = nullptr; - if (pkey != nullptr) - rsa = EVP_PKEY_get1_RSA(pkey); + EVPKeyPointer pkey(X509_get_pubkey(cert)); + RSAPointer rsa; + if (pkey) + rsa.reset(EVP_PKEY_get1_RSA(pkey.get())); - if (rsa != nullptr) { + if (rsa) { const BIGNUM* n; const BIGNUM* e; - RSA_get0_key(rsa, &n, &e, nullptr); - BN_print(bio, n); - BIO_get_mem_ptr(bio, &mem); + RSA_get0_key(rsa.get(), &n, &e, nullptr); + BN_print(bio.get(), n); + BIO_get_mem_ptr(bio.get(), &mem); info->Set(context, env->modulus_string(), String::NewFromUtf8(env->isolate(), mem->data, String::kNormalString, mem->length)).FromJust(); - USE(BIO_reset(bio)); + USE(BIO_reset(bio.get())); uint64_t exponent_word = static_cast(BN_get_word(e)); uint32_t lo = static_cast(exponent_word); uint32_t hi = static_cast(exponent_word >> 32); if (hi == 0) { - BIO_printf(bio, "0x%x", lo); + BIO_printf(bio.get(), "0x%x", lo); } else { - BIO_printf(bio, "0x%x%08x", hi, lo); + BIO_printf(bio.get(), "0x%x%08x", hi, lo); } - BIO_get_mem_ptr(bio, &mem); + BIO_get_mem_ptr(bio.get(), &mem); info->Set(context, env->exponent_string(), String::NewFromUtf8(env->isolate(), mem->data, String::kNormalString, mem->length)).FromJust(); - USE(BIO_reset(bio)); + USE(BIO_reset(bio.get())); - int size = i2d_RSA_PUBKEY(rsa, nullptr); + int size = i2d_RSA_PUBKEY(rsa.get(), nullptr); CHECK_GE(size, 0); Local pubbuff = Buffer::New(env, size).ToLocalChecked(); unsigned char* pubserialized = reinterpret_cast(Buffer::Data(pubbuff)); - i2d_RSA_PUBKEY(rsa, &pubserialized); + i2d_RSA_PUBKEY(rsa.get(), &pubserialized); info->Set(env->pubkey_string(), pubbuff); } - if (pkey != nullptr) { - EVP_PKEY_free(pkey); - pkey = nullptr; - } - if (rsa != nullptr) { - RSA_free(rsa); - rsa = nullptr; - } + pkey.reset(); + rsa.reset(); - ASN1_TIME_print(bio, X509_get_notBefore(cert)); - BIO_get_mem_ptr(bio, &mem); + ASN1_TIME_print(bio.get(), X509_get_notBefore(cert)); + BIO_get_mem_ptr(bio.get(), &mem); info->Set(context, env->valid_from_string(), String::NewFromUtf8(env->isolate(), mem->data, String::kNormalString, mem->length)).FromJust(); - USE(BIO_reset(bio)); + USE(BIO_reset(bio.get())); - ASN1_TIME_print(bio, X509_get_notAfter(cert)); - BIO_get_mem_ptr(bio, &mem); + ASN1_TIME_print(bio.get(), X509_get_notAfter(cert)); + BIO_get_mem_ptr(bio.get(), &mem); info->Set(context, env->valid_to_string(), String::NewFromUtf8(env->isolate(), mem->data, String::kNormalString, mem->length)).FromJust(); - BIO_free_all(bio); + bio.reset(); unsigned char md[EVP_MAX_MD_SIZE]; unsigned int md_size; @@ -1687,32 +1664,35 @@ static Local X509ToObject(Environment* env, X509* cert) { OneByteString(env->isolate(), fingerprint)).FromJust(); } - STACK_OF(ASN1_OBJECT)* eku = static_cast( - X509_get_ext_d2i(cert, NID_ext_key_usage, nullptr, nullptr)); - if (eku != nullptr) { + StackOfASN1 eku(static_cast( + X509_get_ext_d2i(cert, NID_ext_key_usage, nullptr, nullptr))); + if (eku) { Local ext_key_usage = Array::New(env->isolate()); char buf[256]; int j = 0; - for (int i = 0; i < sk_ASN1_OBJECT_num(eku); i++) { - if (OBJ_obj2txt(buf, sizeof(buf), sk_ASN1_OBJECT_value(eku, i), 1) >= 0) + for (int i = 0; i < sk_ASN1_OBJECT_num(eku.get()); i++) { + if (OBJ_obj2txt(buf, + sizeof(buf), + sk_ASN1_OBJECT_value(eku.get(), i), 1) >= 0) { ext_key_usage->Set(context, j++, OneByteString(env->isolate(), buf)).FromJust(); + } } - sk_ASN1_OBJECT_pop_free(eku, ASN1_OBJECT_free); + eku.reset(); info->Set(context, env->ext_key_usage_string(), ext_key_usage).FromJust(); } if (ASN1_INTEGER* serial_number = X509_get_serialNumber(cert)) { - if (BIGNUM* bn = ASN1_INTEGER_to_BN(serial_number, nullptr)) { - if (char* buf = BN_bn2hex(bn)) { + BignumPointer bn(ASN1_INTEGER_to_BN(serial_number, nullptr)); + if (bn) { + OpenSSLBuffer buf(BN_bn2hex(bn.get())); + if (buf) { info->Set(context, env->serial_number_string(), - OneByteString(env->isolate(), buf)).FromJust(); - OPENSSL_free(buf); + OneByteString(env->isolate(), buf.get())).FromJust(); } - BN_free(bn); } } @@ -1728,17 +1708,17 @@ static Local X509ToObject(Environment* env, X509* cert) { } -static Local AddIssuerChainToObject(X509** cert, +static Local AddIssuerChainToObject(X509Pointer* cert, Local object, - StackOfX509 peer_certs, + StackOfX509&& peer_certs, Environment* const env) { Local context = env->isolate()->GetCurrentContext(); - *cert = sk_X509_delete(peer_certs.get(), 0); + cert->reset(sk_X509_delete(peer_certs.get(), 0)); for (;;) { int i; for (i = 0; i < sk_X509_num(peer_certs.get()); i++) { X509* ca = sk_X509_value(peer_certs.get(), i); - if (X509_check_issued(ca, *cert) != X509_V_OK) + if (X509_check_issued(ca, cert->get()) != X509_V_OK) continue; Local ca_info = X509ToObject(env, ca); @@ -1746,10 +1726,8 @@ static Local AddIssuerChainToObject(X509** cert, object = ca_info; // NOTE: Intentionally freeing cert that is not used anymore. - X509_free(*cert); - // Delete cert and continue aggregating issuers. - *cert = sk_X509_delete(peer_certs.get(), i); + cert->reset(sk_X509_delete(peer_certs.get(), i)); break; } @@ -1761,41 +1739,38 @@ static Local AddIssuerChainToObject(X509** cert, } -static StackOfX509 CloneSSLCerts(X509** cert, +static StackOfX509 CloneSSLCerts(X509Pointer&& cert, const STACK_OF(X509)* const ssl_certs) { StackOfX509 peer_certs(sk_X509_new(nullptr)); - if (*cert != nullptr) - sk_X509_push(peer_certs.get(), *cert); + if (cert) + sk_X509_push(peer_certs.get(), cert.release()); for (int i = 0; i < sk_X509_num(ssl_certs); i++) { - *cert = X509_dup(sk_X509_value(ssl_certs, i)); - if (*cert == nullptr) - return StackOfX509(); - if (!sk_X509_push(peer_certs.get(), *cert)) + X509Pointer cert(X509_dup(sk_X509_value(ssl_certs, i))); + if (!cert || !sk_X509_push(peer_certs.get(), cert.get())) return StackOfX509(); + // `cert` is now managed by the stack. + cert.release(); } return peer_certs; } -static Local GetLastIssuedCert(X509** cert, - const SSL* const ssl, +static Local GetLastIssuedCert(X509Pointer* cert, + const SSLPointer& ssl, Local issuer_chain, Environment* const env) { Local context = env->isolate()->GetCurrentContext(); - while (X509_check_issued(*cert, *cert) != X509_V_OK) { + while (X509_check_issued(cert->get(), cert->get()) != X509_V_OK) { X509* ca; - if (SSL_CTX_get_issuer(SSL_get_SSL_CTX(ssl), *cert, &ca) <= 0) + if (SSL_CTX_get_issuer(SSL_get_SSL_CTX(ssl.get()), cert->get(), &ca) <= 0) break; Local ca_info = X509ToObject(env, ca); issuer_chain->Set(context, env->issuercert_string(), ca_info).FromJust(); issuer_chain = ca_info; - // NOTE: Intentionally freeing cert that is not used anymore. - X509_free(*cert); - - // Delete cert and continue aggregating issuers. - *cert = ca; + // Delete previous cert and continue aggregating issuers. + cert->reset(ca); } return issuer_chain; } @@ -1816,40 +1791,35 @@ void SSLWrap::GetPeerCertificate( // NOTE: This is because of the odd OpenSSL behavior. On client `cert_chain` // contains the `peer_certificate`, but on server it doesn't. - X509* cert = w->is_server() ? SSL_get_peer_certificate(w->ssl_) : nullptr; - STACK_OF(X509)* ssl_certs = SSL_get_peer_cert_chain(w->ssl_); - if (cert == nullptr && (ssl_certs == nullptr || sk_X509_num(ssl_certs) == 0)) + X509Pointer cert( + w->is_server() ? SSL_get_peer_certificate(w->ssl_.get()) : nullptr); + STACK_OF(X509)* ssl_certs = SSL_get_peer_cert_chain(w->ssl_.get()); + if (!cert && (ssl_certs == nullptr || sk_X509_num(ssl_certs) == 0)) goto done; // Short result requested. if (args.Length() < 1 || !args[0]->IsTrue()) { - X509* target_cert = cert; - if (target_cert == nullptr) - target_cert = sk_X509_value(ssl_certs, 0); - result = X509ToObject(env, target_cert); + result = X509ToObject(env, cert ? cert.get() : sk_X509_value(ssl_certs, 0)); goto done; } - if (auto peer_certs = CloneSSLCerts(&cert, ssl_certs)) { + if (auto peer_certs = CloneSSLCerts(std::move(cert), ssl_certs)) { // First and main certificate. - cert = sk_X509_value(peer_certs.get(), 0); - result = X509ToObject(env, cert); + X509Pointer cert(sk_X509_value(peer_certs.get(), 0)); + CHECK(cert); + result = X509ToObject(env, cert.release()); issuer_chain = AddIssuerChainToObject(&cert, result, std::move(peer_certs), env); issuer_chain = GetLastIssuedCert(&cert, w->ssl_, issuer_chain, env); // Last certificate should be self-signed. - if (X509_check_issued(cert, cert) == X509_V_OK) + if (X509_check_issued(cert.get(), cert.get()) == X509_V_OK) issuer_chain->Set(env->context(), env->issuercert_string(), issuer_chain).FromJust(); - - CHECK_NE(cert, nullptr); } done: - if (cert != nullptr) - X509_free(cert); if (result.IsEmpty()) result = Object::New(env->isolate()); args.GetReturnValue().Set(result); @@ -1869,12 +1839,12 @@ void SSLWrap::GetFinished(const FunctionCallbackInfo& args) { // sections 7.21.2.1, 7.21.1.2, and 7.1.4, would be violated. // Thus, we use a dummy byte. char dummy[1]; - size_t len = SSL_get_finished(w->ssl_, dummy, sizeof dummy); + size_t len = SSL_get_finished(w->ssl_.get(), dummy, sizeof dummy); if (len == 0) return; char* buf = Malloc(len); - CHECK_EQ(len, SSL_get_finished(w->ssl_, buf, len)); + CHECK_EQ(len, SSL_get_finished(w->ssl_.get(), buf, len)); args.GetReturnValue().Set(Buffer::New(env, buf, len).ToLocalChecked()); } @@ -1892,12 +1862,12 @@ void SSLWrap::GetPeerFinished(const FunctionCallbackInfo& args) { // sections 7.21.2.1, 7.21.1.2, and 7.1.4, would be violated. // Thus, we use a dummy byte. char dummy[1]; - size_t len = SSL_get_peer_finished(w->ssl_, dummy, sizeof dummy); + size_t len = SSL_get_peer_finished(w->ssl_.get(), dummy, sizeof dummy); if (len == 0) return; char* buf = Malloc(len); - CHECK_EQ(len, SSL_get_peer_finished(w->ssl_, buf, len)); + CHECK_EQ(len, SSL_get_peer_finished(w->ssl_.get(), buf, len)); args.GetReturnValue().Set(Buffer::New(env, buf, len).ToLocalChecked()); } @@ -1909,7 +1879,7 @@ void SSLWrap::GetSession(const FunctionCallbackInfo& args) { Base* w; ASSIGN_OR_RETURN_UNWRAP(&w, args.Holder()); - SSL_SESSION* sess = SSL_get_session(w->ssl_); + SSL_SESSION* sess = SSL_get_session(w->ssl_.get()); if (sess == nullptr) return; @@ -1936,19 +1906,18 @@ void SSLWrap::SetSession(const FunctionCallbackInfo& args) { THROW_AND_RETURN_IF_NOT_BUFFER(env, args[0], "Session"); size_t slen = Buffer::Length(args[0]); - char* sbuf = new char[slen]; - memcpy(sbuf, Buffer::Data(args[0]), slen); + std::vector sbuf(slen); + if (char* p = Buffer::Data(args[0])) + sbuf.assign(p, p + slen); - const unsigned char* p = reinterpret_cast(sbuf); - SSL_SESSION* sess = d2i_SSL_SESSION(nullptr, &p, slen); - - delete[] sbuf; + const unsigned char* p = reinterpret_cast(sbuf.data()); + SSLSessionPointer sess( + d2i_SSL_SESSION(nullptr, &p, slen)); if (sess == nullptr) return; - int r = SSL_set_session(w->ssl_, sess); - SSL_SESSION_free(sess); + int r = SSL_set_session(w->ssl_.get(), sess.get()); if (!r) return env->ThrowError("SSL_set_session error"); @@ -1968,9 +1937,7 @@ void SSLWrap::LoadSession(const FunctionCallbackInfo& args) { SSL_SESSION* sess = d2i_SSL_SESSION(nullptr, &p, slen); // Setup next session and move hello to the BIO buffer - if (w->next_sess_ != nullptr) - SSL_SESSION_free(w->next_sess_); - w->next_sess_ = sess; + w->next_sess_.reset(sess); } } @@ -1979,7 +1946,7 @@ template void SSLWrap::IsSessionReused(const FunctionCallbackInfo& args) { Base* w; ASSIGN_OR_RETURN_UNWRAP(&w, args.Holder()); - bool yes = SSL_session_reused(w->ssl_); + bool yes = SSL_session_reused(w->ssl_.get()); args.GetReturnValue().Set(yes); } @@ -1999,7 +1966,7 @@ void SSLWrap::Renegotiate(const FunctionCallbackInfo& args) { ClearErrorOnReturn clear_error_on_return; - bool yes = SSL_renegotiate(w->ssl_) == 1; + bool yes = SSL_renegotiate(w->ssl_.get()) == 1; args.GetReturnValue().Set(yes); } @@ -2009,7 +1976,7 @@ void SSLWrap::Shutdown(const FunctionCallbackInfo& args) { Base* w; ASSIGN_OR_RETURN_UNWRAP(&w, args.Holder()); - int rv = SSL_shutdown(w->ssl_); + int rv = SSL_shutdown(w->ssl_.get()); args.GetReturnValue().Set(rv); } @@ -2020,7 +1987,7 @@ void SSLWrap::GetTLSTicket(const FunctionCallbackInfo& args) { ASSIGN_OR_RETURN_UNWRAP(&w, args.Holder()); Environment* env = w->ssl_env(); - SSL_SESSION* sess = SSL_get_session(w->ssl_); + SSL_SESSION* sess = SSL_get_session(w->ssl_.get()); if (sess == nullptr) return; @@ -2072,7 +2039,7 @@ void SSLWrap::RequestOCSP( Base* w; ASSIGN_OR_RETURN_UNWRAP(&w, args.Holder()); - SSL_set_tlsext_status_type(w->ssl_, TLSEXT_STATUSTYPE_ocsp); + SSL_set_tlsext_status_type(w->ssl_.get(), TLSEXT_STATUSTYPE_ocsp); #endif // NODE__HAVE_TLSEXT_STATUS_CB } @@ -2085,7 +2052,7 @@ void SSLWrap::GetEphemeralKeyInfo( Environment* env = Environment::GetCurrent(args); Local context = env->context(); - CHECK_NE(w->ssl_, nullptr); + CHECK(w->ssl_); // tmp key is available on only client if (w->is_server()) @@ -2095,7 +2062,7 @@ void SSLWrap::GetEphemeralKeyInfo( EVP_PKEY* key; - if (SSL_get_server_tmp_key(w->ssl_, &key)) { + if (SSL_get_server_tmp_key(w->ssl_.get(), &key)) { int kid = EVP_PKEY_id(key); switch (kid) { case EVP_PKEY_DH: @@ -2145,7 +2112,9 @@ void SSLWrap::SetMaxSendFragment( Base* w; ASSIGN_OR_RETURN_UNWRAP(&w, args.Holder()); - int rv = SSL_set_max_send_fragment(w->ssl_, args[0]->Int32Value()); + int rv = SSL_set_max_send_fragment( + w->ssl_.get(), + args[0]->Int32Value(w->ssl_env()->context()).FromJust()); args.GetReturnValue().Set(rv); } #endif // SSL_set_max_send_fragment @@ -2155,7 +2124,7 @@ template void SSLWrap::IsInitFinished(const FunctionCallbackInfo& args) { Base* w; ASSIGN_OR_RETURN_UNWRAP(&w, args.Holder()); - bool yes = SSL_is_init_finished(w->ssl_); + bool yes = SSL_is_init_finished(w->ssl_.get()); args.GetReturnValue().Set(yes); } @@ -2170,9 +2139,9 @@ void SSLWrap::VerifyError(const FunctionCallbackInfo& args) { // here before. long x509_verify_error = // NOLINT(runtime/int) X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT; - if (X509* peer_cert = SSL_get_peer_certificate(w->ssl_)) { + if (X509* peer_cert = SSL_get_peer_certificate(w->ssl_.get())) { X509_free(peer_cert); - x509_verify_error = SSL_get_verify_result(w->ssl_); + x509_verify_error = SSL_get_verify_result(w->ssl_.get()); } if (x509_verify_error == X509_V_OK) @@ -2232,7 +2201,7 @@ void SSLWrap::GetCurrentCipher(const FunctionCallbackInfo& args) { Environment* env = w->ssl_env(); Local context = env->context(); - const SSL_CIPHER* c = SSL_get_current_cipher(w->ssl_); + const SSL_CIPHER* c = SSL_get_current_cipher(w->ssl_.get()); if (c == nullptr) return; @@ -2251,7 +2220,7 @@ void SSLWrap::GetProtocol(const FunctionCallbackInfo& args) { Base* w; ASSIGN_OR_RETURN_UNWRAP(&w, args.Holder()); - const char* tls_version = SSL_get_version(w->ssl_); + const char* tls_version = SSL_get_version(w->ssl_.get()); args.GetReturnValue().Set(OneByteString(args.GetIsolate(), tls_version)); } @@ -2299,7 +2268,7 @@ void SSLWrap::GetALPNNegotiatedProto( const unsigned char* alpn_proto; unsigned int alpn_proto_len; - SSL_get0_alpn_selected(w->ssl_, &alpn_proto, &alpn_proto_len); + SSL_get0_alpn_selected(w->ssl_.get(), &alpn_proto, &alpn_proto_len); if (!alpn_proto) return args.GetReturnValue().Set(false); @@ -2324,16 +2293,17 @@ void SSLWrap::SetALPNProtocols( const unsigned char* alpn_protos = reinterpret_cast(Buffer::Data(args[0])); unsigned alpn_protos_len = Buffer::Length(args[0]); - int r = SSL_set_alpn_protos(w->ssl_, alpn_protos, alpn_protos_len); + int r = SSL_set_alpn_protos(w->ssl_.get(), alpn_protos, alpn_protos_len); CHECK_EQ(r, 0); } else { CHECK( w->object()->SetPrivate( - env->context(), - env->alpn_buffer_private_symbol(), - args[0]).FromJust()); + env->context(), + env->alpn_buffer_private_symbol(), + args[0]).FromJust()); // Server should select ALPN protocol from list of advertised by client - SSL_CTX_set_alpn_select_cb(SSL_get_SSL_CTX(w->ssl_), SelectALPNCallback, + SSL_CTX_set_alpn_select_cb(SSL_get_SSL_CTX(w->ssl_.get()), + SelectALPNCallback, nullptr); } #endif // TLSEXT_TYPE_application_layer_protocol_negotiation @@ -2469,17 +2439,17 @@ void SSLWrap::CertCbDone(const FunctionCallbackInfo& args) { int rv; // NOTE: reference count is not increased by this API methods - X509* x509 = SSL_CTX_get0_certificate(sc->ctx_); - EVP_PKEY* pkey = SSL_CTX_get0_privatekey(sc->ctx_); + X509* x509 = SSL_CTX_get0_certificate(sc->ctx_.get()); + EVP_PKEY* pkey = SSL_CTX_get0_privatekey(sc->ctx_.get()); STACK_OF(X509)* chain; - rv = SSL_CTX_get0_chain_certs(sc->ctx_, &chain); + rv = SSL_CTX_get0_chain_certs(sc->ctx_.get(), &chain); if (rv) - rv = SSL_use_certificate(w->ssl_, x509); + rv = SSL_use_certificate(w->ssl_.get(), x509); if (rv) - rv = SSL_use_PrivateKey(w->ssl_, pkey); + rv = SSL_use_PrivateKey(w->ssl_.get(), pkey); if (rv && chain != nullptr) - rv = SSL_set1_chain(w->ssl_, chain); + rv = SSL_set1_chain(w->ssl_.get(), chain); if (rv) rv = w->SetCACerts(sc); if (!rv) { @@ -2512,19 +2482,18 @@ void SSLWrap::CertCbDone(const FunctionCallbackInfo& args) { template void SSLWrap::DestroySSL() { - if (ssl_ == nullptr) + if (!ssl_) return; - SSL_free(ssl_); env_->isolate()->AdjustAmountOfExternalAllocatedMemory(-kExternalSize); - ssl_ = nullptr; + ssl_.reset(); } template void SSLWrap::SetSNIContext(SecureContext* sc) { ConfigureSecureContext(sc); - CHECK_EQ(SSL_set_SSL_CTX(ssl_, sc->ctx_), sc->ctx_); + CHECK_EQ(SSL_set_SSL_CTX(ssl_.get(), sc->ctx_.get()), sc->ctx_.get()); SetCACerts(sc); } @@ -2532,15 +2501,16 @@ void SSLWrap::SetSNIContext(SecureContext* sc) { template int SSLWrap::SetCACerts(SecureContext* sc) { - int err = SSL_set1_verify_cert_store(ssl_, SSL_CTX_get_cert_store(sc->ctx_)); + int err = SSL_set1_verify_cert_store(ssl_.get(), + SSL_CTX_get_cert_store(sc->ctx_.get())); if (err != 1) return err; STACK_OF(X509_NAME)* list = SSL_dup_CA_list( - SSL_CTX_get_client_CA_list(sc->ctx_)); + SSL_CTX_get_client_CA_list(sc->ctx_.get())); // NOTE: `SSL_set_client_CA_list` takes the ownership of `list` - SSL_set_client_CA_list(ssl_, list); + SSL_set_client_CA_list(ssl_.get(), list); return 1; } @@ -2629,11 +2599,10 @@ void CipherBase::Init(const char* cipher_type, } #endif // NODE_FIPS_MODE - CHECK_EQ(ctx_, nullptr); + CHECK(!ctx_); const EVP_CIPHER* const cipher = EVP_get_cipherbyname(cipher_type); - if (cipher == nullptr) { + if (cipher == nullptr) return env()->ThrowError("Unknown cipher"); - } unsigned char key[EVP_MAX_KEY_LENGTH]; unsigned char iv[EVP_MAX_IV_LENGTH]; @@ -2647,11 +2616,11 @@ void CipherBase::Init(const char* cipher_type, key, iv); - ctx_ = EVP_CIPHER_CTX_new(); + ctx_.reset(EVP_CIPHER_CTX_new()); const bool encrypt = (kind_ == kCipher); - EVP_CipherInit_ex(ctx_, cipher, nullptr, nullptr, nullptr, encrypt); + EVP_CipherInit_ex(ctx_.get(), cipher, nullptr, nullptr, nullptr, encrypt); - int mode = EVP_CIPHER_CTX_mode(ctx_); + int mode = EVP_CIPHER_CTX_mode(ctx_.get()); if (encrypt && (mode == EVP_CIPH_CTR_MODE || mode == EVP_CIPH_GCM_MODE || mode == EVP_CIPH_CCM_MODE)) { // Ignore the return value (i.e. possible exception) because we are @@ -2662,7 +2631,7 @@ void CipherBase::Init(const char* cipher_type, } if (mode == EVP_CIPH_WRAP_MODE) - EVP_CIPHER_CTX_set_flags(ctx_, EVP_CIPHER_CTX_FLAG_WRAP_ALLOW); + EVP_CIPHER_CTX_set_flags(ctx_.get(), EVP_CIPHER_CTX_FLAG_WRAP_ALLOW); if (IsAuthenticatedMode()) { if (!InitAuthenticated(cipher_type, EVP_CIPHER_iv_length(cipher), @@ -2670,9 +2639,9 @@ void CipherBase::Init(const char* cipher_type, return; } - CHECK_EQ(1, EVP_CIPHER_CTX_set_key_length(ctx_, key_len)); + CHECK_EQ(1, EVP_CIPHER_CTX_set_key_length(ctx_.get(), key_len)); - EVP_CipherInit_ex(ctx_, + EVP_CipherInit_ex(ctx_.get(), nullptr, nullptr, reinterpret_cast(key), @@ -2730,13 +2699,13 @@ void CipherBase::InitIv(const char* cipher_type, return env()->ThrowError("Invalid IV length"); } - ctx_ = EVP_CIPHER_CTX_new(); + ctx_.reset(EVP_CIPHER_CTX_new()); if (mode == EVP_CIPH_WRAP_MODE) - EVP_CIPHER_CTX_set_flags(ctx_, EVP_CIPHER_CTX_FLAG_WRAP_ALLOW); + EVP_CIPHER_CTX_set_flags(ctx_.get(), EVP_CIPHER_CTX_FLAG_WRAP_ALLOW); const bool encrypt = (kind_ == kCipher); - EVP_CipherInit_ex(ctx_, cipher, nullptr, nullptr, nullptr, encrypt); + EVP_CipherInit_ex(ctx_.get(), cipher, nullptr, nullptr, nullptr, encrypt); if (IsAuthenticatedMode()) { CHECK(has_iv); @@ -2744,13 +2713,12 @@ void CipherBase::InitIv(const char* cipher_type, return; } - if (!EVP_CIPHER_CTX_set_key_length(ctx_, key_len)) { - EVP_CIPHER_CTX_free(ctx_); - ctx_ = nullptr; + if (!EVP_CIPHER_CTX_set_key_length(ctx_.get(), key_len)) { + ctx_.reset(); return env()->ThrowError("Invalid key length"); } - EVP_CipherInit_ex(ctx_, + EVP_CipherInit_ex(ctx_.get(), nullptr, nullptr, reinterpret_cast(key), @@ -2791,12 +2759,15 @@ bool CipherBase::InitAuthenticated(const char *cipher_type, int iv_len, int auth_tag_len) { CHECK(IsAuthenticatedMode()); - if (!EVP_CIPHER_CTX_ctrl(ctx_, EVP_CTRL_AEAD_SET_IVLEN, iv_len, nullptr)) { + if (!EVP_CIPHER_CTX_ctrl(ctx_.get(), + EVP_CTRL_AEAD_SET_IVLEN, + iv_len, + nullptr)) { env()->ThrowError("Invalid IV length"); return false; } - if (EVP_CIPHER_CTX_mode(ctx_) == EVP_CIPH_CCM_MODE) { + if (EVP_CIPHER_CTX_mode(ctx_.get()) == EVP_CIPH_CCM_MODE) { if (auth_tag_len < 0) { char msg[128]; snprintf(msg, sizeof(msg), "authTagLength required for %s", cipher_type); @@ -2812,8 +2783,8 @@ bool CipherBase::InitAuthenticated(const char *cipher_type, int iv_len, } #endif - if (!EVP_CIPHER_CTX_ctrl(ctx_, EVP_CTRL_CCM_SET_TAG, auth_tag_len, - nullptr)) { + if (!EVP_CIPHER_CTX_ctrl(ctx_.get(), EVP_CTRL_CCM_SET_TAG, auth_tag_len, + nullptr)) { env()->ThrowError("Invalid authentication tag length"); return false; } @@ -2836,8 +2807,8 @@ bool CipherBase::InitAuthenticated(const char *cipher_type, int iv_len, bool CipherBase::CheckCCMMessageLength(int message_len) { - CHECK_NE(ctx_, nullptr); - CHECK(EVP_CIPHER_CTX_mode(ctx_) == EVP_CIPH_CCM_MODE); + CHECK(ctx_); + CHECK(EVP_CIPHER_CTX_mode(ctx_.get()) == EVP_CIPH_CCM_MODE); if (message_len > max_message_size_) { env()->ThrowError("Message exceeds maximum size"); @@ -2850,8 +2821,8 @@ bool CipherBase::CheckCCMMessageLength(int message_len) { bool CipherBase::IsAuthenticatedMode() const { // Check if this cipher operates in an AEAD mode that we support. - CHECK_NE(ctx_, nullptr); - const int mode = EVP_CIPHER_CTX_mode(ctx_); + CHECK(ctx_); + const int mode = EVP_CIPHER_CTX_mode(ctx_.get()); return mode == EVP_CIPH_GCM_MODE || mode == EVP_CIPH_CCM_MODE; } @@ -2862,7 +2833,7 @@ void CipherBase::GetAuthTag(const FunctionCallbackInfo& args) { ASSIGN_OR_RETURN_UNWRAP(&cipher, args.Holder()); // Only callable after Final and if encrypting. - if (cipher->ctx_ != nullptr || + if (cipher->ctx_ || cipher->kind_ != kCipher || cipher->auth_tag_len_ == 0) { return args.GetReturnValue().SetUndefined(); @@ -2879,7 +2850,7 @@ void CipherBase::SetAuthTag(const FunctionCallbackInfo& args) { CipherBase* cipher; ASSIGN_OR_RETURN_UNWRAP(&cipher, args.Holder()); - if (cipher->ctx_ == nullptr || + if (!cipher->ctx_ || !cipher->IsAuthenticatedMode() || cipher->kind_ != kDecipher) { return args.GetReturnValue().Set(false); @@ -2887,7 +2858,7 @@ void CipherBase::SetAuthTag(const FunctionCallbackInfo& args) { // Restrict GCM tag lengths according to NIST 800-38d, page 9. unsigned int tag_len = Buffer::Length(args[0]); - const int mode = EVP_CIPHER_CTX_mode(cipher->ctx_); + const int mode = EVP_CIPHER_CTX_mode(cipher->ctx_.get()); if (mode == EVP_CIPH_GCM_MODE) { if (tag_len > 16 || (tag_len < 12 && tag_len != 8 && tag_len != 4)) { char msg[125]; @@ -2909,11 +2880,11 @@ void CipherBase::SetAuthTag(const FunctionCallbackInfo& args) { bool CipherBase::SetAAD(const char* data, unsigned int len, int plaintext_len) { - if (ctx_ == nullptr || !IsAuthenticatedMode()) + if (!ctx_ || !IsAuthenticatedMode()) return false; int outlen; - const int mode = EVP_CIPHER_CTX_mode(ctx_); + const int mode = EVP_CIPHER_CTX_mode(ctx_.get()); // When in CCM mode, we need to set the authentication tag and the plaintext // length in advance. @@ -2927,7 +2898,7 @@ bool CipherBase::SetAAD(const char* data, unsigned int len, int plaintext_len) { return false; if (kind_ == kDecipher && !auth_tag_set_ && auth_tag_len_ > 0) { - if (!EVP_CIPHER_CTX_ctrl(ctx_, + if (!EVP_CIPHER_CTX_ctrl(ctx_.get(), EVP_CTRL_CCM_SET_TAG, auth_tag_len_, reinterpret_cast(auth_tag_))) { @@ -2937,11 +2908,11 @@ bool CipherBase::SetAAD(const char* data, unsigned int len, int plaintext_len) { } // Specify the plaintext length. - if (!EVP_CipherUpdate(ctx_, nullptr, &outlen, nullptr, plaintext_len)) + if (!EVP_CipherUpdate(ctx_.get(), nullptr, &outlen, nullptr, plaintext_len)) return false; } - return 1 == EVP_CipherUpdate(ctx_, + return 1 == EVP_CipherUpdate(ctx_.get(), nullptr, &outlen, reinterpret_cast(data), @@ -2967,10 +2938,10 @@ CipherBase::UpdateResult CipherBase::Update(const char* data, int len, unsigned char** out, int* out_len) { - if (ctx_ == nullptr) + if (!ctx_) return kErrorState; - const int mode = EVP_CIPHER_CTX_mode(ctx_); + const int mode = EVP_CIPHER_CTX_mode(ctx_.get()); if (mode == EVP_CIPH_CCM_MODE) { if (!CheckCCMMessageLength(len)) @@ -2980,21 +2951,35 @@ CipherBase::UpdateResult CipherBase::Update(const char* data, // on first update: if (kind_ == kDecipher && IsAuthenticatedMode() && auth_tag_len_ > 0 && !auth_tag_set_) { - EVP_CIPHER_CTX_ctrl(ctx_, + EVP_CIPHER_CTX_ctrl(ctx_.get(), EVP_CTRL_GCM_SET_TAG, auth_tag_len_, reinterpret_cast(auth_tag_)); auth_tag_set_ = true; } - *out_len = len + EVP_CIPHER_CTX_block_size(ctx_); - *out = Malloc(static_cast(*out_len)); - int r = EVP_CipherUpdate(ctx_, + *out_len = 0; + int buff_len = len + EVP_CIPHER_CTX_block_size(ctx_.get()); + // For key wrapping algorithms, get output size by calling + // EVP_CipherUpdate() with null output. + if (mode == EVP_CIPH_WRAP_MODE && + EVP_CipherUpdate(ctx_.get(), + nullptr, + &buff_len, + reinterpret_cast(data), + len) != 1) { + return kErrorState; + } + + *out = Malloc(buff_len); + int r = EVP_CipherUpdate(ctx_.get(), *out, out_len, reinterpret_cast(data), len); + CHECK_LE(*out_len, buff_len); + // When in CCM mode, EVP_CipherUpdate will fail if the authentication tag is // invalid. In that case, remember the error and throw in final(). if (!r && kind_ == kDecipher && mode == EVP_CIPH_CCM_MODE) { @@ -3045,9 +3030,9 @@ void CipherBase::Update(const FunctionCallbackInfo& args) { bool CipherBase::SetAutoPadding(bool auto_padding) { - if (ctx_ == nullptr) + if (!ctx_) return false; - return EVP_CIPHER_CTX_set_padding(ctx_, auto_padding); + return EVP_CIPHER_CTX_set_padding(ctx_.get(), auto_padding); } @@ -3061,13 +3046,13 @@ void CipherBase::SetAutoPadding(const FunctionCallbackInfo& args) { bool CipherBase::Final(unsigned char** out, int *out_len) { - if (ctx_ == nullptr) + if (!ctx_) return false; - const int mode = EVP_CIPHER_CTX_mode(ctx_); + const int mode = EVP_CIPHER_CTX_mode(ctx_.get()); *out = Malloc( - static_cast(EVP_CIPHER_CTX_block_size(ctx_))); + static_cast(EVP_CIPHER_CTX_block_size(ctx_.get()))); // In CCM mode, final() only checks whether authentication failed in update(). // EVP_CipherFinal_ex must not be called and will fail. @@ -3075,21 +3060,20 @@ bool CipherBase::Final(unsigned char** out, int *out_len) { if (kind_ == kDecipher && mode == EVP_CIPH_CCM_MODE) { ok = !pending_auth_failed_; } else { - ok = EVP_CipherFinal_ex(ctx_, *out, out_len) == 1; + ok = EVP_CipherFinal_ex(ctx_.get(), *out, out_len) == 1; if (ok && kind_ == kCipher && IsAuthenticatedMode()) { // For GCM, the tag length is static (16 bytes), while the CCM tag length // must be specified in advance. if (mode == EVP_CIPH_GCM_MODE) auth_tag_len_ = sizeof(auth_tag_); - CHECK_EQ(1, EVP_CIPHER_CTX_ctrl(ctx_, EVP_CTRL_AEAD_GET_TAG, + CHECK_EQ(1, EVP_CIPHER_CTX_ctrl(ctx_.get(), EVP_CTRL_AEAD_GET_TAG, auth_tag_len_, reinterpret_cast(auth_tag_))); } } - EVP_CIPHER_CTX_free(ctx_); - ctx_ = nullptr; + ctx_.reset(); return ok; } @@ -3132,11 +3116,6 @@ void CipherBase::Final(const FunctionCallbackInfo& args) { } -Hmac::~Hmac() { - HMAC_CTX_free(ctx_); -} - - void Hmac::Initialize(Environment* env, v8::Local target) { Local t = env->NewFunctionTemplate(New); @@ -3166,11 +3145,9 @@ void Hmac::HmacInit(const char* hash_type, const char* key, int key_len) { if (key_len == 0) { key = ""; } - ctx_ = HMAC_CTX_new(); - if (ctx_ == nullptr || - !HMAC_Init_ex(ctx_, key, key_len, md, nullptr)) { - HMAC_CTX_free(ctx_); - ctx_ = nullptr; + ctx_.reset(HMAC_CTX_new()); + if (!ctx_ || !HMAC_Init_ex(ctx_.get(), key, key_len, md, nullptr)) { + ctx_.reset(); return ThrowCryptoError(env(), ERR_get_error()); } } @@ -3189,9 +3166,11 @@ void Hmac::HmacInit(const FunctionCallbackInfo& args) { bool Hmac::HmacUpdate(const char* data, int len) { - if (ctx_ == nullptr) + if (!ctx_) return false; - int r = HMAC_Update(ctx_, reinterpret_cast(data), len); + int r = HMAC_Update(ctx_.get(), + reinterpret_cast(data), + len); return r == 1; } @@ -3236,10 +3215,9 @@ void Hmac::HmacDigest(const FunctionCallbackInfo& args) { unsigned char md_value[EVP_MAX_MD_SIZE]; unsigned int md_len = 0; - if (hmac->ctx_ != nullptr) { - HMAC_Final(hmac->ctx_, md_value, &md_len); - HMAC_CTX_free(hmac->ctx_); - hmac->ctx_ = nullptr; + if (hmac->ctx_) { + HMAC_Final(hmac->ctx_.get(), md_value, &md_len); + hmac->ctx_.reset(); } Local error; @@ -3258,11 +3236,6 @@ void Hmac::HmacDigest(const FunctionCallbackInfo& args) { } -Hash::~Hash() { - EVP_MD_CTX_free(mdctx_); -} - - void Hash::Initialize(Environment* env, v8::Local target) { Local t = env->NewFunctionTemplate(New); @@ -3292,11 +3265,9 @@ bool Hash::HashInit(const char* hash_type) { const EVP_MD* md = EVP_get_digestbyname(hash_type); if (md == nullptr) return false; - mdctx_ = EVP_MD_CTX_new(); - if (mdctx_ == nullptr || - EVP_DigestInit_ex(mdctx_, md, nullptr) <= 0) { - EVP_MD_CTX_free(mdctx_); - mdctx_ = nullptr; + mdctx_.reset(EVP_MD_CTX_new()); + if (!mdctx_ || EVP_DigestInit_ex(mdctx_.get(), md, nullptr) <= 0) { + mdctx_.reset(); return false; } finalized_ = false; @@ -3305,9 +3276,9 @@ bool Hash::HashInit(const char* hash_type) { bool Hash::HashUpdate(const char* data, int len) { - if (mdctx_ == nullptr) + if (!mdctx_) return false; - EVP_DigestUpdate(mdctx_, data, len); + EVP_DigestUpdate(mdctx_.get(), data, len); return true; } @@ -3351,7 +3322,7 @@ void Hash::HashDigest(const FunctionCallbackInfo& args) { unsigned char md_value[EVP_MAX_MD_SIZE]; unsigned int md_len; - EVP_DigestFinal_ex(hash->mdctx_, md_value, &md_len); + EVP_DigestFinal_ex(hash->mdctx_.get(), md_value, &md_len); hash->finalized_ = true; Local error; @@ -3370,11 +3341,6 @@ void Hash::HashDigest(const FunctionCallbackInfo& args) { } -SignBase::~SignBase() { - EVP_MD_CTX_free(mdctx_); -} - - SignBase::Error SignBase::Init(const char* sign_type) { CHECK_EQ(mdctx_, nullptr); // Historically, "dss1" and "DSS1" were DSA aliases for SHA-1 @@ -3387,11 +3353,9 @@ SignBase::Error SignBase::Init(const char* sign_type) { if (md == nullptr) return kSignUnknownDigest; - mdctx_ = EVP_MD_CTX_new(); - if (mdctx_ == nullptr || - !EVP_DigestInit_ex(mdctx_, md, nullptr)) { - EVP_MD_CTX_free(mdctx_); - mdctx_ = nullptr; + mdctx_.reset(EVP_MD_CTX_new()); + if (!mdctx_ || !EVP_DigestInit_ex(mdctx_.get(), md, nullptr)) { + mdctx_.reset(); return kSignInit; } @@ -3402,7 +3366,7 @@ SignBase::Error SignBase::Init(const char* sign_type) { SignBase::Error SignBase::Update(const char* data, int len) { if (mdctx_ == nullptr) return kSignNotInitialised; - if (!EVP_DigestUpdate(mdctx_, data, len)) + if (!EVP_DigestUpdate(mdctx_.get(), data, len)) return kSignUpdate; return kSignOk; } @@ -3445,10 +3409,12 @@ void SignBase::CheckThrow(SignBase::Error error) { } } -static bool ApplyRSAOptions(EVP_PKEY* pkey, EVP_PKEY_CTX* pkctx, int padding, +static bool ApplyRSAOptions(const EVPKeyPointer& pkey, + EVP_PKEY_CTX* pkctx, + int padding, int salt_len) { - if (EVP_PKEY_id(pkey) == EVP_PKEY_RSA || - EVP_PKEY_id(pkey) == EVP_PKEY_RSA2) { + if (EVP_PKEY_id(pkey.get()) == EVP_PKEY_RSA || + EVP_PKEY_id(pkey.get()) == EVP_PKEY_RSA2) { if (EVP_PKEY_CTX_set_rsa_padding(pkctx, padding) <= 0) return false; if (padding == RSA_PKCS1_PSS_PADDING) { @@ -3502,35 +3468,29 @@ void Sign::SignUpdate(const FunctionCallbackInfo& args) { sign->CheckThrow(err); } -static int Node_SignFinal(EVP_MD_CTX* mdctx, unsigned char* md, - unsigned int* sig_len, EVP_PKEY* pkey, int padding, +static int Node_SignFinal(EVPMDPointer&& mdctx, unsigned char* md, + unsigned int* sig_len, + const EVPKeyPointer& pkey, int padding, int pss_salt_len) { unsigned char m[EVP_MAX_MD_SIZE]; unsigned int m_len; - int rv = 0; - EVP_PKEY_CTX* pkctx = nullptr; *sig_len = 0; - if (!EVP_DigestFinal_ex(mdctx, m, &m_len)) - return rv; - - size_t sltmp = static_cast(EVP_PKEY_size(pkey)); - pkctx = EVP_PKEY_CTX_new(pkey, nullptr); - if (pkctx == nullptr) - goto err; - if (EVP_PKEY_sign_init(pkctx) <= 0) - goto err; - if (!ApplyRSAOptions(pkey, pkctx, padding, pss_salt_len)) - goto err; - if (EVP_PKEY_CTX_set_signature_md(pkctx, EVP_MD_CTX_md(mdctx)) <= 0) - goto err; - if (EVP_PKEY_sign(pkctx, md, &sltmp, m, m_len) <= 0) - goto err; - *sig_len = sltmp; - rv = 1; - err: - EVP_PKEY_CTX_free(pkctx); - return rv; + if (!EVP_DigestFinal_ex(mdctx.get(), m, &m_len)) + return 0; + + size_t sltmp = static_cast(EVP_PKEY_size(pkey.get())); + EVPKeyCtxPointer pkctx(EVP_PKEY_CTX_new(pkey.get(), nullptr)); + if (pkctx && + EVP_PKEY_sign_init(pkctx.get()) > 0 && + ApplyRSAOptions(pkey, pkctx.get(), padding, pss_salt_len) && + EVP_PKEY_CTX_set_signature_md(pkctx.get(), + EVP_MD_CTX_md(mdctx.get())) > 0 && + EVP_PKEY_sign(pkctx.get(), md, &sltmp, m, m_len) > 0) { + *sig_len = sltmp; + return 1; + } + return 0; } SignBase::Error Sign::SignFinal(const char* key_pem, @@ -3543,24 +3503,22 @@ SignBase::Error Sign::SignFinal(const char* key_pem, if (!mdctx_) return kSignNotInitialised; - BIO* bp = nullptr; - EVP_PKEY* pkey = nullptr; - bool fatal = true; + EVPMDPointer mdctx = std::move(mdctx_); - bp = BIO_new_mem_buf(const_cast(key_pem), key_pem_len); - if (bp == nullptr) - goto exit; + BIOPointer bp(BIO_new_mem_buf(const_cast(key_pem), key_pem_len)); + if (!bp) + return kSignPrivateKey; - pkey = PEM_read_bio_PrivateKey(bp, - nullptr, - PasswordCallback, - const_cast(passphrase)); + EVPKeyPointer pkey(PEM_read_bio_PrivateKey(bp.get(), + nullptr, + PasswordCallback, + const_cast(passphrase))); // Errors might be injected into OpenSSL's error stack // without `pkey` being set to nullptr; // cf. the test of `test_bad_rsa_privkey.pem` for an example. - if (pkey == nullptr || 0 != ERR_peek_error()) - goto exit; + if (!pkey || 0 != ERR_peek_error()) + return kSignPrivateKey; #ifdef NODE_FIPS_MODE /* Validate DSA2 parameters from FIPS 186-4 */ @@ -3579,28 +3537,15 @@ SignBase::Error Sign::SignFinal(const char* key_pem, result = true; if (!result) { - fatal = true; - goto exit; + return kSignPrivateKey; } } #endif // NODE_FIPS_MODE - if (Node_SignFinal(mdctx_, sig, sig_len, pkey, padding, salt_len)) - fatal = false; - - exit: - if (pkey != nullptr) - EVP_PKEY_free(pkey); - if (bp != nullptr) - BIO_free_all(bp); - - EVP_MD_CTX_free(mdctx_); - mdctx_ = nullptr; - - if (fatal) + if (Node_SignFinal(std::move(mdctx), sig, sig_len, pkey, padding, salt_len)) + return kSignOk; + else return kSignPrivateKey; - - return kSignOk; } @@ -3701,87 +3646,60 @@ SignBase::Error Verify::VerifyFinal(const char* key_pem, if (!mdctx_) return kSignNotInitialised; - EVP_PKEY* pkey = nullptr; - BIO* bp = nullptr; - X509* x509 = nullptr; - bool fatal = true; + EVPKeyPointer pkey; unsigned char m[EVP_MAX_MD_SIZE]; unsigned int m_len; int r = 0; - EVP_PKEY_CTX* pkctx = nullptr; + *verify_result = false; + EVPMDPointer mdctx = std::move(mdctx_); - bp = BIO_new_mem_buf(const_cast(key_pem), key_pem_len); - if (bp == nullptr) - goto exit; + BIOPointer bp(BIO_new_mem_buf(const_cast(key_pem), key_pem_len)); + if (!bp) + return kSignPublicKey; // Check if this is a PKCS#8 or RSA public key before trying as X.509. // Split this out into a separate function once we have more than one // consumer of public keys. if (strncmp(key_pem, PUBLIC_KEY_PFX, PUBLIC_KEY_PFX_LEN) == 0) { - pkey = PEM_read_bio_PUBKEY(bp, nullptr, NoPasswordCallback, nullptr); - if (pkey == nullptr) - goto exit; + pkey.reset( + PEM_read_bio_PUBKEY(bp.get(), nullptr, NoPasswordCallback, nullptr)); } else if (strncmp(key_pem, PUBRSA_KEY_PFX, PUBRSA_KEY_PFX_LEN) == 0) { - RSA* rsa = - PEM_read_bio_RSAPublicKey(bp, nullptr, PasswordCallback, nullptr); + RSAPointer rsa(PEM_read_bio_RSAPublicKey( + bp.get(), nullptr, PasswordCallback, nullptr)); if (rsa) { - pkey = EVP_PKEY_new(); + pkey.reset(EVP_PKEY_new()); if (pkey) - EVP_PKEY_set1_RSA(pkey, rsa); - RSA_free(rsa); + EVP_PKEY_set1_RSA(pkey.get(), rsa.get()); } - if (pkey == nullptr) - goto exit; } else { // X.509 fallback - x509 = PEM_read_bio_X509(bp, nullptr, NoPasswordCallback, nullptr); - if (x509 == nullptr) - goto exit; - - pkey = X509_get_pubkey(x509); - if (pkey == nullptr) - goto exit; - } - - if (!EVP_DigestFinal_ex(mdctx_, m, &m_len)) { - goto exit; - } - - fatal = false; - - pkctx = EVP_PKEY_CTX_new(pkey, nullptr); - if (pkctx == nullptr) - goto err; - if (EVP_PKEY_verify_init(pkctx) <= 0) - goto err; - if (!ApplyRSAOptions(pkey, pkctx, padding, saltlen)) - goto err; - if (EVP_PKEY_CTX_set_signature_md(pkctx, EVP_MD_CTX_md(mdctx_)) <= 0) - goto err; - r = EVP_PKEY_verify(pkctx, - reinterpret_cast(sig), - siglen, - m, - m_len); - - err: - EVP_PKEY_CTX_free(pkctx); - - exit: - if (pkey != nullptr) - EVP_PKEY_free(pkey); - if (bp != nullptr) - BIO_free_all(bp); - if (x509 != nullptr) - X509_free(x509); + X509Pointer x509(PEM_read_bio_X509( + bp.get(), nullptr, NoPasswordCallback, nullptr)); + if (!x509) + return kSignPublicKey; - EVP_MD_CTX_free(mdctx_); - mdctx_ = nullptr; + pkey.reset(X509_get_pubkey(x509.get())); + } + if (!pkey) + return kSignPublicKey; - if (fatal) + if (!EVP_DigestFinal_ex(mdctx.get(), m, &m_len)) return kSignPublicKey; - *verify_result = r == 1; + EVPKeyCtxPointer pkctx(EVP_PKEY_CTX_new(pkey.get(), nullptr)); + if (pkctx && + EVP_PKEY_verify_init(pkctx.get()) > 0 && + ApplyRSAOptions(pkey, pkctx.get(), padding, saltlen) && + EVP_PKEY_CTX_set_signature_md(pkctx.get(), + EVP_MD_CTX_md(mdctx.get())) > 0) { + r = EVP_PKEY_verify(pkctx.get(), + reinterpret_cast(sig), + siglen, + m, + m_len); + *verify_result = r == 1; + } + return kSignOk; } @@ -3830,81 +3748,60 @@ bool PublicKeyCipher::Cipher(const char* key_pem, int len, unsigned char** out, size_t* out_len) { - EVP_PKEY* pkey = nullptr; - EVP_PKEY_CTX* ctx = nullptr; - BIO* bp = nullptr; - X509* x509 = nullptr; - bool fatal = true; + EVPKeyPointer pkey; - bp = BIO_new_mem_buf(const_cast(key_pem), key_pem_len); - if (bp == nullptr) - goto exit; + BIOPointer bp(BIO_new_mem_buf(const_cast(key_pem), key_pem_len)); + if (!bp) + return false; // Check if this is a PKCS#8 or RSA public key before trying as X.509 and // private key. if (operation == kPublic && strncmp(key_pem, PUBLIC_KEY_PFX, PUBLIC_KEY_PFX_LEN) == 0) { - pkey = PEM_read_bio_PUBKEY(bp, nullptr, nullptr, nullptr); - if (pkey == nullptr) - goto exit; + pkey.reset(PEM_read_bio_PUBKEY(bp.get(), nullptr, nullptr, nullptr)); } else if (operation == kPublic && strncmp(key_pem, PUBRSA_KEY_PFX, PUBRSA_KEY_PFX_LEN) == 0) { - RSA* rsa = PEM_read_bio_RSAPublicKey(bp, nullptr, nullptr, nullptr); + RSAPointer rsa( + PEM_read_bio_RSAPublicKey(bp.get(), nullptr, nullptr, nullptr)); if (rsa) { - pkey = EVP_PKEY_new(); + pkey.reset(EVP_PKEY_new()); if (pkey) - EVP_PKEY_set1_RSA(pkey, rsa); - RSA_free(rsa); + EVP_PKEY_set1_RSA(pkey.get(), rsa.get()); } - if (pkey == nullptr) - goto exit; } else if (operation == kPublic && strncmp(key_pem, CERTIFICATE_PFX, CERTIFICATE_PFX_LEN) == 0) { - x509 = PEM_read_bio_X509(bp, nullptr, NoPasswordCallback, nullptr); - if (x509 == nullptr) - goto exit; + X509Pointer x509( + PEM_read_bio_X509(bp.get(), nullptr, NoPasswordCallback, nullptr)); + if (!x509) + return false; - pkey = X509_get_pubkey(x509); - if (pkey == nullptr) - goto exit; + pkey.reset(X509_get_pubkey(x509.get())); } else { - pkey = PEM_read_bio_PrivateKey(bp, - nullptr, - PasswordCallback, - const_cast(passphrase)); - if (pkey == nullptr) - goto exit; + pkey.reset(PEM_read_bio_PrivateKey(bp.get(), + nullptr, + PasswordCallback, + const_cast(passphrase))); } + if (!pkey) + return false; - ctx = EVP_PKEY_CTX_new(pkey, nullptr); + EVPKeyCtxPointer ctx(EVP_PKEY_CTX_new(pkey.get(), nullptr)); if (!ctx) - goto exit; - if (EVP_PKEY_cipher_init(ctx) <= 0) - goto exit; - if (EVP_PKEY_CTX_set_rsa_padding(ctx, padding) <= 0) - goto exit; + return false; + if (EVP_PKEY_cipher_init(ctx.get()) <= 0) + return false; + if (EVP_PKEY_CTX_set_rsa_padding(ctx.get(), padding) <= 0) + return false; - if (EVP_PKEY_cipher(ctx, nullptr, out_len, data, len) <= 0) - goto exit; + if (EVP_PKEY_cipher(ctx.get(), nullptr, out_len, data, len) <= 0) + return false; *out = Malloc(*out_len); - if (EVP_PKEY_cipher(ctx, *out, out_len, data, len) <= 0) - goto exit; - - fatal = false; - - exit: - if (x509 != nullptr) - X509_free(x509); - if (pkey != nullptr) - EVP_PKEY_free(pkey); - if (bp != nullptr) - BIO_free_all(bp); - if (ctx != nullptr) - EVP_PKEY_CTX_free(ctx); + if (EVP_PKEY_cipher(ctx.get(), *out, out_len, data, len) <= 0) + return false; - return !fatal; + return true; } @@ -4018,8 +3915,8 @@ void DiffieHellman::Initialize(Environment* env, Local target) { bool DiffieHellman::Init(int primeLength, int g) { - dh = DH_new(); - if (!DH_generate_parameters_ex(dh, primeLength, g, 0)) + dh_.reset(DH_new()); + if (!DH_generate_parameters_ex(dh_.get(), primeLength, g, 0)) return false; bool result = VerifyContext(); if (!result) @@ -4030,12 +3927,12 @@ bool DiffieHellman::Init(int primeLength, int g) { bool DiffieHellman::Init(const char* p, int p_len, int g) { - dh = DH_new(); + dh_.reset(DH_new()); BIGNUM* bn_p = BN_bin2bn(reinterpret_cast(p), p_len, nullptr); BIGNUM* bn_g = BN_new(); if (!BN_set_word(bn_g, g) || - !DH_set0_pqg(dh, bn_p, nullptr, bn_g)) { + !DH_set0_pqg(dh_.get(), bn_p, nullptr, bn_g)) { BN_free(bn_p); BN_free(bn_g); return false; @@ -4049,10 +3946,10 @@ bool DiffieHellman::Init(const char* p, int p_len, int g) { bool DiffieHellman::Init(const char* p, int p_len, const char* g, int g_len) { - dh = DH_new(); + dh_.reset(DH_new()); BIGNUM *bn_p = BN_bin2bn(reinterpret_cast(p), p_len, 0); BIGNUM *bn_g = BN_bin2bn(reinterpret_cast(g), g_len, 0); - if (!DH_set0_pqg(dh, bn_p, nullptr, bn_g)) { + if (!DH_set0_pqg(dh_.get(), bn_p, nullptr, bn_g)) { BN_free(bn_p); BN_free(bn_g); return false; @@ -4140,12 +4037,12 @@ void DiffieHellman::GenerateKeys(const FunctionCallbackInfo& args) { return ThrowCryptoError(env, ERR_get_error(), "Not initialized"); } - if (!DH_generate_key(diffieHellman->dh)) { + if (!DH_generate_key(diffieHellman->dh_.get())) { return ThrowCryptoError(env, ERR_get_error(), "Key generation failed"); } const BIGNUM* pub_key; - DH_get0_key(diffieHellman->dh, &pub_key, nullptr); + DH_get0_key(diffieHellman->dh_.get(), &pub_key, nullptr); size_t size = BN_num_bytes(pub_key); char* data = Malloc(size); BN_bn2bin(pub_key, reinterpret_cast(data)); @@ -4162,7 +4059,7 @@ void DiffieHellman::GetField(const FunctionCallbackInfo& args, ASSIGN_OR_RETURN_UNWRAP(&dh, args.Holder()); if (!dh->initialised_) return env->ThrowError("Not initialized"); - const BIGNUM* num = get_field(dh->dh); + const BIGNUM* num = get_field(dh->dh_.get()); if (num == nullptr) return env->ThrowError(err_if_null); size_t size = BN_num_bytes(num); @@ -4218,33 +4115,31 @@ void DiffieHellman::ComputeSecret(const FunctionCallbackInfo& args) { } ClearErrorOnReturn clear_error_on_return; - BIGNUM* key = nullptr; if (args.Length() == 0) { return THROW_ERR_MISSING_ARGS( env, "Other party's public key argument is mandatory"); - } else { - THROW_AND_RETURN_IF_NOT_BUFFER(env, args[0], "Other party's public key"); - key = BN_bin2bn( - reinterpret_cast(Buffer::Data(args[0])), - Buffer::Length(args[0]), - 0); } - int dataSize = DH_size(diffieHellman->dh); - char* data = Malloc(dataSize); + THROW_AND_RETURN_IF_NOT_BUFFER(env, args[0], "Other party's public key"); + BignumPointer key(BN_bin2bn( + reinterpret_cast(Buffer::Data(args[0])), + Buffer::Length(args[0]), + 0)); + + MallocedBuffer data(DH_size(diffieHellman->dh_.get())); - int size = DH_compute_key(reinterpret_cast(data), - key, - diffieHellman->dh); + int size = DH_compute_key(reinterpret_cast(data.data), + key.get(), + diffieHellman->dh_.get()); if (size == -1) { int checkResult; int checked; - checked = DH_check_pub_key(diffieHellman->dh, key, &checkResult); - BN_free(key); - free(data); + checked = DH_check_pub_key(diffieHellman->dh_.get(), + key.get(), + &checkResult); if (!checked) { return ThrowCryptoError(env, ERR_get_error(), "Invalid Key"); @@ -4263,21 +4158,20 @@ void DiffieHellman::ComputeSecret(const FunctionCallbackInfo& args) { UNREACHABLE(); } - BN_free(key); CHECK_GE(size, 0); // DH_size returns number of bytes in a prime number // DH_compute_key returns number of bytes in a remainder of exponent, which // may have less bytes than a prime number. Therefore add 0-padding to the // allocated buffer. - if (size != dataSize) { - CHECK(dataSize > size); - memmove(data + dataSize - size, data, size); - memset(data, 0, dataSize - size); + if (static_cast(size) != data.size) { + CHECK_GT(data.size, static_cast(size)); + memmove(data.data + data.size - size, data.data, size); + memset(data.data, 0, data.size - size); } - auto rc = Buffer::New(env->isolate(), data, dataSize).ToLocalChecked(); - args.GetReturnValue().Set(rc); + args.GetReturnValue().Set( + Buffer::New(env->isolate(), data.release(), data.size).ToLocalChecked()); } void DiffieHellman::SetKey(const v8::FunctionCallbackInfo& args, @@ -4304,7 +4198,7 @@ void DiffieHellman::SetKey(const v8::FunctionCallbackInfo& args, BN_bin2bn(reinterpret_cast(Buffer::Data(args[0])), Buffer::Length(args[0]), nullptr); CHECK_NE(num, nullptr); - CHECK_EQ(1, set_field(dh->dh, num)); + CHECK_EQ(1, set_field(dh->dh_.get(), num)); } @@ -4343,7 +4237,7 @@ void DiffieHellman::VerifyErrorGetter(const FunctionCallbackInfo& args) { bool DiffieHellman::VerifyContext() { int codes; - if (!DH_check(dh, &codes)) + if (!DH_check(dh_.get(), &codes)) return false; verifyError_ = codes; return true; @@ -4383,11 +4277,11 @@ void ECDH::New(const FunctionCallbackInfo& args) { return THROW_ERR_INVALID_ARG_VALUE(env, "First argument should be a valid curve name"); - EC_KEY* key = EC_KEY_new_by_curve_name(nid); - if (key == nullptr) + ECKeyPointer key(EC_KEY_new_by_curve_name(nid)); + if (!key) return env->ThrowError("Failed to create EC_KEY using curve name"); - new ECDH(env, args.This(), key); + new ECDH(env, args.This(), std::move(key)); } @@ -4397,34 +4291,31 @@ void ECDH::GenerateKeys(const FunctionCallbackInfo& args) { ECDH* ecdh; ASSIGN_OR_RETURN_UNWRAP(&ecdh, args.Holder()); - if (!EC_KEY_generate_key(ecdh->key_)) + if (!EC_KEY_generate_key(ecdh->key_.get())) return env->ThrowError("Failed to generate EC_KEY"); } -EC_POINT* ECDH::BufferToPoint(Environment* env, - const EC_GROUP* group, - char* data, - size_t len) { - EC_POINT* pub; +ECPointPointer ECDH::BufferToPoint(Environment* env, + const EC_GROUP* group, + char* data, + size_t len) { int r; - pub = EC_POINT_new(group); - if (pub == nullptr) { + ECPointPointer pub(EC_POINT_new(group)); + if (!pub) { env->ThrowError("Failed to allocate EC_POINT for a public key"); - return nullptr; + return pub; } r = EC_POINT_oct2point( group, - pub, + pub.get(), reinterpret_cast(data), len, nullptr); - if (!r) { - EC_POINT_free(pub); - return nullptr; - } + if (!r) + return ECPointPointer(); return pub; } @@ -4443,11 +4334,12 @@ void ECDH::ComputeSecret(const FunctionCallbackInfo& args) { if (!ecdh->IsKeyPairValid()) return env->ThrowError("Invalid key pair"); - EC_POINT* pub = ECDH::BufferToPoint(env, - ecdh->group_, - Buffer::Data(args[0]), - Buffer::Length(args[0])); - if (pub == nullptr) { + ECPointPointer pub( + ECDH::BufferToPoint(env, + ecdh->group_, + Buffer::Data(args[0]), + Buffer::Length(args[0]))); + if (!pub) { args.GetReturnValue().Set( FIXED_ONE_BYTE_STRING(env->isolate(), "ERR_CRYPTO_ECDH_INVALID_PUBLIC_KEY")); @@ -4459,8 +4351,7 @@ void ECDH::ComputeSecret(const FunctionCallbackInfo& args) { size_t out_len = (field_size + 7) / 8; char* out = node::Malloc(out_len); - int r = ECDH_compute_key(out, out_len, pub, ecdh->key_, nullptr); - EC_POINT_free(pub); + int r = ECDH_compute_key(out, out_len, pub.get(), ecdh->key_.get(), nullptr); if (!r) { free(out); return env->ThrowError("Failed to compute ECDH key"); @@ -4480,7 +4371,7 @@ void ECDH::GetPublicKey(const FunctionCallbackInfo& args) { ECDH* ecdh; ASSIGN_OR_RETURN_UNWRAP(&ecdh, args.Holder()); - const EC_POINT* pub = EC_KEY_get0_public_key(ecdh->key_); + const EC_POINT* pub = EC_KEY_get0_public_key(ecdh->key_.get()); if (pub == nullptr) return env->ThrowError("Failed to get ECDH public key"); @@ -4512,7 +4403,7 @@ void ECDH::GetPrivateKey(const FunctionCallbackInfo& args) { ECDH* ecdh; ASSIGN_OR_RETURN_UNWRAP(&ecdh, args.Holder()); - const BIGNUM* b = EC_KEY_get0_private_key(ecdh->key_); + const BIGNUM* b = EC_KEY_get0_private_key(ecdh->key_.get()); if (b == nullptr) return env->ThrowError("Failed to get ECDH private key"); @@ -4538,20 +4429,19 @@ void ECDH::SetPrivateKey(const FunctionCallbackInfo& args) { THROW_AND_RETURN_IF_NOT_BUFFER(env, args[0], "Private key"); - BIGNUM* priv = BN_bin2bn( + BignumPointer priv(BN_bin2bn( reinterpret_cast(Buffer::Data(args[0].As())), Buffer::Length(args[0].As()), - nullptr); - if (priv == nullptr) + nullptr)); + if (!priv) return env->ThrowError("Failed to convert Buffer to BN"); if (!ecdh->IsKeyValidForCurve(priv)) { - BN_free(priv); return env->ThrowError("Private key is not valid for specified curve."); } - int result = EC_KEY_set_private_key(ecdh->key_, priv); - BN_free(priv); + int result = EC_KEY_set_private_key(ecdh->key_.get(), priv.get()); + priv.reset(); if (!result) { return env->ThrowError("Failed to convert BN to a private key"); @@ -4559,28 +4449,24 @@ void ECDH::SetPrivateKey(const FunctionCallbackInfo& args) { // To avoid inconsistency, clear the current public key in-case computing // the new one fails for some reason. - EC_KEY_set_public_key(ecdh->key_, nullptr); + EC_KEY_set_public_key(ecdh->key_.get(), nullptr); MarkPopErrorOnReturn mark_pop_error_on_return; USE(&mark_pop_error_on_return); - const BIGNUM* priv_key = EC_KEY_get0_private_key(ecdh->key_); + const BIGNUM* priv_key = EC_KEY_get0_private_key(ecdh->key_.get()); CHECK_NE(priv_key, nullptr); - EC_POINT* pub = EC_POINT_new(ecdh->group_); - CHECK_NE(pub, nullptr); + ECPointPointer pub(EC_POINT_new(ecdh->group_)); + CHECK(pub); - if (!EC_POINT_mul(ecdh->group_, pub, priv_key, nullptr, nullptr, nullptr)) { - EC_POINT_free(pub); + if (!EC_POINT_mul(ecdh->group_, pub.get(), priv_key, + nullptr, nullptr, nullptr)) { return env->ThrowError("Failed to generate ECDH public key"); } - if (!EC_KEY_set_public_key(ecdh->key_, pub)) { - EC_POINT_free(pub); + if (!EC_KEY_set_public_key(ecdh->key_.get(), pub.get())) return env->ThrowError("Failed to set generated public key"); - } - - EC_POINT_free(pub); } @@ -4594,41 +4480,39 @@ void ECDH::SetPublicKey(const FunctionCallbackInfo& args) { MarkPopErrorOnReturn mark_pop_error_on_return; - EC_POINT* pub = ECDH::BufferToPoint(env, - ecdh->group_, - Buffer::Data(args[0].As()), - Buffer::Length(args[0].As())); - if (pub == nullptr) + ECPointPointer pub( + ECDH::BufferToPoint(env, + ecdh->group_, + Buffer::Data(args[0].As()), + Buffer::Length(args[0].As()))); + if (!pub) return env->ThrowError("Failed to convert Buffer to EC_POINT"); - int r = EC_KEY_set_public_key(ecdh->key_, pub); - EC_POINT_free(pub); + int r = EC_KEY_set_public_key(ecdh->key_.get(), pub.get()); if (!r) return env->ThrowError("Failed to set EC_POINT as the public key"); } -bool ECDH::IsKeyValidForCurve(const BIGNUM* private_key) { - CHECK_NE(group_, nullptr); - CHECK_NE(private_key, nullptr); +bool ECDH::IsKeyValidForCurve(const BignumPointer& private_key) { + CHECK(group_); + CHECK(private_key); // Private keys must be in the range [1, n-1]. // Ref: Section 3.2.1 - http://www.secg.org/sec1-v2.pdf - if (BN_cmp(private_key, BN_value_one()) < 0) { + if (BN_cmp(private_key.get(), BN_value_one()) < 0) { return false; } - BIGNUM* order = BN_new(); - CHECK_NE(order, nullptr); - bool result = EC_GROUP_get_order(group_, order, nullptr) && - BN_cmp(private_key, order) < 0; - BN_free(order); - return result; + BignumPointer order(BN_new()); + CHECK(order); + return EC_GROUP_get_order(group_, order.get(), nullptr) && + BN_cmp(private_key.get(), order.get()) < 0; } bool ECDH::IsKeyPairValid() { MarkPopErrorOnReturn mark_pop_error_on_return; USE(&mark_pop_error_on_return); - return 1 == EC_KEY_check_key(key_); + return 1 == EC_KEY_check_key(key_.get()); } @@ -4637,39 +4521,17 @@ class PBKDF2Request : public AsyncWrap { PBKDF2Request(Environment* env, Local object, const EVP_MD* digest, - int passlen, - char* pass, - int saltlen, - char* salt, - int iter, - int keylen) + MallocedBuffer&& pass, + MallocedBuffer&& salt, + int keylen, + int iteration_count) : AsyncWrap(env, object, AsyncWrap::PROVIDER_PBKDF2REQUEST), digest_(digest), success_(false), - passlen_(passlen), - pass_(pass), - saltlen_(saltlen), - salt_(salt), - keylen_(keylen), - key_(node::Malloc(keylen)), - iter_(iter) { - Wrap(object, this); - } - - ~PBKDF2Request() override { - free(pass_); - pass_ = nullptr; - passlen_ = 0; - - free(salt_); - salt_ = nullptr; - saltlen_ = 0; - - free(key_); - key_ = nullptr; - keylen_ = 0; - - ClearWrap(object()); + pass_(std::move(pass)), + salt_(std::move(salt)), + key_(keylen), + iteration_count_(iteration_count) { } uv_work_t* work_req() { @@ -4689,23 +4551,23 @@ class PBKDF2Request : public AsyncWrap { uv_work_t work_req_; const EVP_MD* digest_; bool success_; - int passlen_; - char* pass_; - int saltlen_; - char* salt_; - int keylen_; - char* key_; - int iter_; + MallocedBuffer pass_; + MallocedBuffer salt_; + MallocedBuffer key_; + int iteration_count_; }; void PBKDF2Request::Work() { success_ = PKCS5_PBKDF2_HMAC( - pass_, passlen_, reinterpret_cast(salt_), saltlen_, - iter_, digest_, keylen_, reinterpret_cast(key_)); - OPENSSL_cleanse(pass_, passlen_); - OPENSSL_cleanse(salt_, saltlen_); + pass_.data, pass_.size, + reinterpret_cast(salt_.data), salt_.size, + iteration_count_, digest_, + key_.size, + reinterpret_cast(key_.data)); + OPENSSL_cleanse(pass_.data, pass_.size); + OPENSSL_cleanse(salt_.data, salt_.size); } @@ -4718,9 +4580,8 @@ void PBKDF2Request::Work(uv_work_t* work_req) { void PBKDF2Request::After(Local (*argv)[2]) { if (success_) { (*argv)[0] = Null(env()->isolate()); - (*argv)[1] = Buffer::New(env(), key_, keylen_).ToLocalChecked(); - key_ = nullptr; - keylen_ = 0; + (*argv)[1] = Buffer::New(env(), key_.release(), key_.size) + .ToLocalChecked(); } else { (*argv)[0] = Exception::Error(env()->pbkdf2_error_string()); (*argv)[1] = Undefined(env()->isolate()); @@ -4749,37 +4610,27 @@ void PBKDF2(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); const EVP_MD* digest = nullptr; - char* pass = nullptr; - char* salt = nullptr; - int passlen = -1; - int saltlen = -1; - double raw_keylen = -1; int keylen = -1; - int iter = -1; + int iteration_count = -1; Local obj; - passlen = Buffer::Length(args[0]); - - pass = node::Malloc(passlen); - memcpy(pass, Buffer::Data(args[0]), passlen); + int passlen = Buffer::Length(args[0]); - saltlen = Buffer::Length(args[1]); + MallocedBuffer pass(passlen); + memcpy(pass.data, Buffer::Data(args[0]), passlen); - salt = node::Malloc(saltlen); - memcpy(salt, Buffer::Data(args[1]), saltlen); + int saltlen = Buffer::Length(args[1]); - iter = args[2]->Int32Value(); + MallocedBuffer salt(saltlen); + memcpy(salt.data, Buffer::Data(args[1]), saltlen); - raw_keylen = args[3]->NumberValue(); - - keylen = static_cast(raw_keylen); + iteration_count = args[2]->Int32Value(env->context()).FromJust(); + keylen = args[3]->IntegerValue(env->context()).FromJust(); if (args[4]->IsString()) { node::Utf8Value digest_name(env->isolate(), args[4]); digest = EVP_get_digestbyname(*digest_name); if (digest == nullptr) { - free(salt); - free(pass); args.GetReturnValue().Set(-1); return; } @@ -4792,8 +4643,11 @@ void PBKDF2(const FunctionCallbackInfo& args) { obj = env->pbkdf2_constructor_template()-> NewInstance(env->context()).ToLocalChecked(); std::unique_ptr req( - new PBKDF2Request(env, obj, digest, passlen, pass, saltlen, salt, iter, - keylen)); + new PBKDF2Request(env, obj, digest, + std::move(pass), + std::move(salt), + keylen, + iteration_count)); if (args[5]->IsFunction()) { obj->Set(env->context(), env->ondone_string(), args[5]).FromJust(); @@ -4831,11 +4685,6 @@ class RandomBytesRequest : public AsyncWrap { size_(size), data_(data), free_mode_(free_mode) { - Wrap(object, this); - } - - ~RandomBytesRequest() override { - ClearWrap(object()); } uv_work_t* work_req() { @@ -5041,14 +4890,14 @@ void RandomBytesBuffer(const FunctionCallbackInfo& args) { void GetSSLCiphers(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); - SSL_CTX* ctx = SSL_CTX_new(TLS_method()); - CHECK_NE(ctx, nullptr); + SSLCtxPointer ctx(SSL_CTX_new(TLS_method())); + CHECK(ctx); - SSL* ssl = SSL_new(ctx); - CHECK_NE(ssl, nullptr); + SSLPointer ssl(SSL_new(ctx.get())); + CHECK(ssl); Local arr = Array::New(env->isolate()); - STACK_OF(SSL_CIPHER)* ciphers = SSL_get_ciphers(ssl); + STACK_OF(SSL_CIPHER)* ciphers = SSL_get_ciphers(ssl.get()); for (int i = 0; i < sk_SSL_CIPHER_num(ciphers); ++i) { const SSL_CIPHER* cipher = sk_SSL_CIPHER_value(ciphers, i); @@ -5058,9 +4907,6 @@ void GetSSLCiphers(const FunctionCallbackInfo& args) { SSL_CIPHER_get_name(cipher))).FromJust(); } - SSL_free(ssl); - SSL_CTX_free(ctx); - args.GetReturnValue().Set(arr); } @@ -5111,12 +4957,11 @@ void GetCurves(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); const size_t num_curves = EC_get_builtin_curves(nullptr, 0); Local arr = Array::New(env->isolate(), num_curves); - EC_builtin_curve* curves; if (num_curves) { - curves = node::Malloc(num_curves); + std::vector curves(num_curves); - if (EC_get_builtin_curves(curves, num_curves)) { + if (EC_get_builtin_curves(curves.data(), num_curves)) { for (size_t i = 0; i < num_curves; i++) { arr->Set(env->context(), i, @@ -5124,8 +4969,6 @@ void GetCurves(const FunctionCallbackInfo& args) { OBJ_nid2sn(curves[i].nid))).FromJust(); } } - - free(curves); } args.GetReturnValue().Set(arr); @@ -5133,28 +4976,15 @@ void GetCurves(const FunctionCallbackInfo& args) { bool VerifySpkac(const char* data, unsigned int len) { - bool verify_result = false; - EVP_PKEY* pkey = nullptr; - NETSCAPE_SPKI* spki = nullptr; - - spki = NETSCAPE_SPKI_b64_decode(data, len); - if (spki == nullptr) - goto exit; - - pkey = X509_PUBKEY_get(spki->spkac->pubkey); - if (pkey == nullptr) - goto exit; - - verify_result = NETSCAPE_SPKI_verify(spki, pkey) > 0; - - exit: - if (pkey != nullptr) - EVP_PKEY_free(pkey); + NetscapeSPKIPointer spki(NETSCAPE_SPKI_b64_decode(data, len)); + if (!spki) + return false; - if (spki != nullptr) - NETSCAPE_SPKI_free(spki); + EVPKeyPointer pkey(X509_PUBKEY_get(spki->spkac->pubkey)); + if (!pkey) + return false; - return verify_result; + return NETSCAPE_SPKI_verify(spki.get(), pkey.get()) > 0; } @@ -5176,41 +5006,29 @@ void VerifySpkac(const FunctionCallbackInfo& args) { char* ExportPublicKey(const char* data, int len, size_t* size) { char* buf = nullptr; - EVP_PKEY* pkey = nullptr; - NETSCAPE_SPKI* spki = nullptr; - BIO* bio = BIO_new(BIO_s_mem()); - if (bio == nullptr) - goto exit; + BIOPointer bio(BIO_new(BIO_s_mem())); + if (!bio) + return nullptr; - spki = NETSCAPE_SPKI_b64_decode(data, len); - if (spki == nullptr) - goto exit; + NetscapeSPKIPointer spki(NETSCAPE_SPKI_b64_decode(data, len)); + if (!spki) + return nullptr; - pkey = NETSCAPE_SPKI_get_pubkey(spki); - if (pkey == nullptr) - goto exit; + EVPKeyPointer pkey(NETSCAPE_SPKI_get_pubkey(spki.get())); + if (!pkey) + return nullptr; - if (PEM_write_bio_PUBKEY(bio, pkey) <= 0) - goto exit; + if (PEM_write_bio_PUBKEY(bio.get(), pkey.get()) <= 0) + return nullptr; BUF_MEM* ptr; - BIO_get_mem_ptr(bio, &ptr); + BIO_get_mem_ptr(bio.get(), &ptr); *size = ptr->length; buf = Malloc(*size); memcpy(buf, ptr->data, *size); - exit: - if (pkey != nullptr) - EVP_PKEY_free(pkey); - - if (spki != nullptr) - NETSCAPE_SPKI_free(spki); - - if (bio != nullptr) - BIO_free_all(bio); - return buf; } @@ -5235,19 +5053,15 @@ void ExportPublicKey(const FunctionCallbackInfo& args) { } -const char* ExportChallenge(const char* data, int len) { - NETSCAPE_SPKI* sp = nullptr; - - sp = NETSCAPE_SPKI_b64_decode(data, len); - if (sp == nullptr) +OpenSSLBuffer ExportChallenge(const char* data, int len) { + NetscapeSPKIPointer sp(NETSCAPE_SPKI_b64_decode(data, len)); + if (!sp) return nullptr; unsigned char* buf = nullptr; ASN1_STRING_to_UTF8(&buf, sp->spkac->challenge); - NETSCAPE_SPKI_free(sp); - - return reinterpret_cast(buf); + return OpenSSLBuffer(reinterpret_cast(buf)); } @@ -5261,13 +5075,12 @@ void ExportChallenge(const FunctionCallbackInfo& args) { char* data = Buffer::Data(args[0]); CHECK_NE(data, nullptr); - const char* cert = ExportChallenge(data, len); - if (cert == nullptr) + OpenSSLBuffer cert = ExportChallenge(data, len); + if (!cert) return args.GetReturnValue().SetEmptyString(); - Local outString = Encode(env->isolate(), cert, strlen(cert), BUFFER); - - OPENSSL_free(const_cast(cert)); + Local outString = + Encode(env->isolate(), cert.get(), strlen(cert.get()), BUFFER); args.GetReturnValue().Set(outString); } @@ -5289,19 +5102,16 @@ void ConvertKey(const FunctionCallbackInfo& args) { if (nid == NID_undef) return env->ThrowTypeError("Invalid ECDH curve name"); - EC_GROUP* group = EC_GROUP_new_by_curve_name(nid); + ECGroupPointer group( + EC_GROUP_new_by_curve_name(nid)); if (group == nullptr) return env->ThrowError("Failed to get EC_GROUP"); - EC_POINT* pub = ECDH::BufferToPoint(env, - group, - Buffer::Data(args[0]), - len); - - std::shared_ptr cleanup(nullptr, [group, pub] (...) { - EC_GROUP_free(group); - EC_POINT_free(pub); - }); + ECPointPointer pub( + ECDH::BufferToPoint(env, + group.get(), + Buffer::Data(args[0]), + len)); if (pub == nullptr) return env->ThrowError("Failed to convert Buffer to EC_POINT"); @@ -5309,13 +5119,15 @@ void ConvertKey(const FunctionCallbackInfo& args) { point_conversion_form_t form = static_cast(args[2]->Uint32Value()); - int size = EC_POINT_point2oct(group, pub, form, nullptr, 0, nullptr); + int size = EC_POINT_point2oct( + group.get(), pub.get(), form, nullptr, 0, nullptr); + if (size == 0) return env->ThrowError("Failed to get public key length"); unsigned char* out = node::Malloc(size); - int r = EC_POINT_point2oct(group, pub, form, out, size, nullptr); + int r = EC_POINT_point2oct(group.get(), pub.get(), form, out, size, nullptr); if (r != size) { free(out); return env->ThrowError("Failed to get public key"); diff --git a/src/node_crypto.h b/src/node_crypto.h index df7f47c82ab159..8b78d272df6149 100644 --- a/src/node_crypto.h +++ b/src/node_crypto.h @@ -75,6 +75,32 @@ struct MarkPopErrorOnReturn { ~MarkPopErrorOnReturn() { ERR_pop_to_mark(); } }; +template +struct FunctionDeleter { + void operator()(T* pointer) const { function(pointer); } + typedef std::unique_ptr Pointer; +}; + +template +using DeleteFnPtr = typename FunctionDeleter::Pointer; + +// Define smart pointers for the most commonly used OpenSSL types: +using X509Pointer = DeleteFnPtr; +using BIOPointer = DeleteFnPtr; +using SSLCtxPointer = DeleteFnPtr; +using SSLSessionPointer = DeleteFnPtr; +using SSLPointer = DeleteFnPtr; +using EVPKeyPointer = DeleteFnPtr; +using EVPKeyCtxPointer = DeleteFnPtr; +using EVPMDPointer = DeleteFnPtr; +using RSAPointer = DeleteFnPtr; +using BignumPointer = DeleteFnPtr; +using NetscapeSPKIPointer = DeleteFnPtr; +using ECGroupPointer = DeleteFnPtr; +using ECPointPointer = DeleteFnPtr; +using ECKeyPointer = DeleteFnPtr; +using DHPointer = DeleteFnPtr; + enum CheckResult { CHECK_CERT_REVOKED = 0, CHECK_OK = 1 @@ -87,14 +113,14 @@ extern void UseExtraCaCerts(const std::string& file); class SecureContext : public BaseObject { public: ~SecureContext() override { - FreeCTXMem(); + Reset(); } static void Initialize(Environment* env, v8::Local target); - SSL_CTX* ctx_; - X509* cert_; - X509* issuer_; + SSLCtxPointer ctx_; + X509Pointer cert_; + X509Pointer issuer_; #ifndef OPENSSL_NO_ENGINE bool client_cert_engine_provided_ = false; #endif // !OPENSSL_NO_ENGINE @@ -171,28 +197,16 @@ class SecureContext : public BaseObject { #endif SecureContext(Environment* env, v8::Local wrap) - : BaseObject(env, wrap), - ctx_(nullptr), - cert_(nullptr), - issuer_(nullptr) { - MakeWeak(this); + : BaseObject(env, wrap) { + MakeWeak(); env->isolate()->AdjustAmountOfExternalAllocatedMemory(kExternalSize); } - void FreeCTXMem() { - if (!ctx_) { - return; - } - + inline void Reset() { env()->isolate()->AdjustAmountOfExternalAllocatedMemory(-kExternalSize); - SSL_CTX_free(ctx_); - if (cert_ != nullptr) - X509_free(cert_); - if (issuer_ != nullptr) - X509_free(issuer_); - ctx_ = nullptr; - cert_ = nullptr; - issuer_ = nullptr; + ctx_.reset(); + cert_.reset(); + issuer_.reset(); } }; @@ -215,20 +229,15 @@ class SSLWrap { cert_cb_(nullptr), cert_cb_arg_(nullptr), cert_cb_running_(false) { - ssl_ = SSL_new(sc->ctx_); + ssl_.reset(SSL_new(sc->ctx_.get())); + CHECK(ssl_); env_->isolate()->AdjustAmountOfExternalAllocatedMemory(kExternalSize); - CHECK_NE(ssl_, nullptr); } virtual ~SSLWrap() { DestroySSL(); - if (next_sess_ != nullptr) { - SSL_SESSION_free(next_sess_); - next_sess_ = nullptr; - } } - inline SSL* ssl() const { return ssl_; } inline void enable_session_callbacks() { session_callbacks_ = true; } inline bool is_server() const { return kind_ == kServer; } inline bool is_client() const { return kind_ == kClient; } @@ -319,8 +328,8 @@ class SSLWrap { Environment* const env_; Kind kind_; - SSL_SESSION* next_sess_; - SSL* ssl_; + SSLSessionPointer next_sess_; + SSLPointer ssl_; bool session_callbacks_; bool new_session_wait_; @@ -344,10 +353,6 @@ class SSLWrap { class CipherBase : public BaseObject { public: - ~CipherBase() override { - EVP_CIPHER_CTX_free(ctx_); - } - static void Initialize(Environment* env, v8::Local target); protected: @@ -401,11 +406,11 @@ class CipherBase : public BaseObject { auth_tag_set_(false), auth_tag_len_(0), pending_auth_failed_(false) { - MakeWeak(this); + MakeWeak(); } private: - EVP_CIPHER_CTX* ctx_; + DeleteFnPtr ctx_; const CipherKind kind_; bool auth_tag_set_; unsigned int auth_tag_len_; @@ -416,8 +421,6 @@ class CipherBase : public BaseObject { class Hmac : public BaseObject { public: - ~Hmac() override; - static void Initialize(Environment* env, v8::Local target); protected: @@ -432,17 +435,15 @@ class Hmac : public BaseObject { Hmac(Environment* env, v8::Local wrap) : BaseObject(env, wrap), ctx_(nullptr) { - MakeWeak(this); + MakeWeak(); } private: - HMAC_CTX* ctx_; + DeleteFnPtr ctx_; }; class Hash : public BaseObject { public: - ~Hash() override; - static void Initialize(Environment* env, v8::Local target); bool HashInit(const char* hash_type); @@ -457,11 +458,11 @@ class Hash : public BaseObject { : BaseObject(env, wrap), mdctx_(nullptr), finalized_(false) { - MakeWeak(this); + MakeWeak(); } private: - EVP_MD_CTX* mdctx_; + EVPMDPointer mdctx_; bool finalized_; }; @@ -478,19 +479,16 @@ class SignBase : public BaseObject { } Error; SignBase(Environment* env, v8::Local wrap) - : BaseObject(env, wrap), - mdctx_(nullptr) { + : BaseObject(env, wrap) { } - ~SignBase() override; - Error Init(const char* sign_type); Error Update(const char* data, int len); protected: void CheckThrow(Error error); - EVP_MD_CTX* mdctx_; + EVPMDPointer mdctx_; }; class Sign : public SignBase { @@ -512,7 +510,7 @@ class Sign : public SignBase { static void SignFinal(const v8::FunctionCallbackInfo& args); Sign(Environment* env, v8::Local wrap) : SignBase(env, wrap) { - MakeWeak(this); + MakeWeak(); } }; @@ -535,7 +533,7 @@ class Verify : public SignBase { static void VerifyFinal(const v8::FunctionCallbackInfo& args); Verify(Environment* env, v8::Local wrap) : SignBase(env, wrap) { - MakeWeak(this); + MakeWeak(); } }; @@ -571,12 +569,6 @@ class PublicKeyCipher { class DiffieHellman : public BaseObject { public: - ~DiffieHellman() override { - if (dh != nullptr) { - DH_free(dh); - } - } - static void Initialize(Environment* env, v8::Local target); bool Init(int primeLength, int g); @@ -601,9 +593,8 @@ class DiffieHellman : public BaseObject { DiffieHellman(Environment* env, v8::Local wrap) : BaseObject(env, wrap), initialised_(false), - verifyError_(0), - dh(nullptr) { - MakeWeak(this); + verifyError_(0) { + MakeWeak(); } private: @@ -616,30 +607,27 @@ class DiffieHellman : public BaseObject { bool initialised_; int verifyError_; - DH* dh; + DHPointer dh_; }; class ECDH : public BaseObject { public: ~ECDH() override { - if (key_ != nullptr) - EC_KEY_free(key_); - key_ = nullptr; group_ = nullptr; } static void Initialize(Environment* env, v8::Local target); - static EC_POINT* BufferToPoint(Environment* env, - const EC_GROUP* group, - char* data, - size_t len); + static ECPointPointer BufferToPoint(Environment* env, + const EC_GROUP* group, + char* data, + size_t len); protected: - ECDH(Environment* env, v8::Local wrap, EC_KEY* key) + ECDH(Environment* env, v8::Local wrap, ECKeyPointer&& key) : BaseObject(env, wrap), - key_(key), - group_(EC_KEY_get0_group(key_)) { - MakeWeak(this); + key_(std::move(key)), + group_(EC_KEY_get0_group(key_.get())) { + MakeWeak(); CHECK_NE(group_, nullptr); } @@ -652,9 +640,9 @@ class ECDH : public BaseObject { static void SetPublicKey(const v8::FunctionCallbackInfo& args); bool IsKeyPairValid(); - bool IsKeyValidForCurve(const BIGNUM* private_key); + bool IsKeyValidForCurve(const BignumPointer& private_key); - EC_KEY* key_; + ECKeyPointer key_; const EC_GROUP* group_; }; diff --git a/src/node_file.cc b/src/node_file.cc index 89c53afc5b197e..97b957eed66b31 100644 --- a/src/node_file.cc +++ b/src/node_file.cc @@ -99,7 +99,7 @@ FileHandle::FileHandle(Environment* env, int fd, Local obj) AsyncWrap::PROVIDER_FILEHANDLE), StreamBase(env), fd_(fd) { - MakeWeak(this); + MakeWeak(); v8::PropertyAttribute attr = static_cast(v8::ReadOnly | v8::DontDelete); object()->DefineOwnProperty(env->context(), diff --git a/src/node_file.h b/src/node_file.h index d6c8aa443c39f4..03e41097d5f12e 100644 --- a/src/node_file.h +++ b/src/node_file.h @@ -28,11 +28,6 @@ class FSReqBase : public ReqWrap { FSReqBase(Environment* env, Local req, AsyncWrap::ProviderType type) : ReqWrap(env, req, type) { - Wrap(object(), this); - } - - virtual ~FSReqBase() { - ClearWrap(object()); } void Init(const char* syscall, @@ -249,10 +244,10 @@ class FileHandle : public AsyncWrap, public StreamBase { env->fdclose_constructor_template() ->NewInstance(env->context()).ToLocalChecked(), AsyncWrap::PROVIDER_FILEHANDLECLOSEREQ) { - Wrap(object(), this); promise_.Reset(env->isolate(), promise); ref_.Reset(env->isolate(), ref); } + ~CloseReq() { uv_fs_req_cleanup(req()); promise_.Reset(); diff --git a/src/node_http2.cc b/src/node_http2.cc index cb5c14a6faa20c..94f774e2d3506d 100644 --- a/src/node_http2.cc +++ b/src/node_http2.cc @@ -244,11 +244,6 @@ Http2Session::Http2Settings::Http2Settings( Init(); } -Http2Session::Http2Settings::~Http2Settings() { - if (!object().IsEmpty()) - ClearWrap(object()); -} - // Generates a Buffer that contains the serialized payload of a SETTINGS // frame. This can be used, for instance, to create the Base64-encoded // content of an Http2-Settings header field. @@ -458,7 +453,7 @@ Http2Session::Http2Session(Environment* env, nghttp2_session_type type) : AsyncWrap(env, wrap, AsyncWrap::PROVIDER_HTTP2SESSION), session_type_(type) { - MakeWeak(this); + MakeWeak(); statistics_.start_time = uv_hrtime(); // Capture the configuration options for this session @@ -1668,7 +1663,7 @@ Http2Stream::Http2Stream( session_(session), id_(id), current_headers_category_(category) { - MakeWeak(this); + MakeWeak(); statistics_.start_time = uv_hrtime(); // Limit the number of header pairs @@ -2615,11 +2610,6 @@ Http2Session::Http2Ping::Http2Ping( session_(session), startTime_(uv_hrtime()) { } -Http2Session::Http2Ping::~Http2Ping() { - if (!object().IsEmpty()) - ClearWrap(object()); -} - void Http2Session::Http2Ping::Send(uint8_t* payload) { uint8_t data[8]; if (payload == nullptr) { @@ -2660,7 +2650,7 @@ void Initialize(Local target, Isolate* isolate = env->isolate(); HandleScope scope(isolate); - std::unique_ptr state(new http2_state(isolate)); + std::unique_ptr state(new Http2State(isolate)); #define SET_STATE_TYPEDARRAY(name, field) \ target->Set(context, \ diff --git a/src/node_http2.h b/src/node_http2.h index 8ee4fd450e578d..f4ac926bb54452 100644 --- a/src/node_http2.h +++ b/src/node_http2.h @@ -1148,7 +1148,6 @@ class Http2StreamPerformanceEntry : public PerformanceEntry { class Http2Session::Http2Ping : public AsyncWrap { public: explicit Http2Ping(Http2Session* session); - ~Http2Ping(); size_t self_size() const override { return sizeof(*this); } @@ -1169,7 +1168,6 @@ class Http2Session::Http2Settings : public AsyncWrap { public: explicit Http2Settings(Environment* env); explicit Http2Settings(Http2Session* session); - ~Http2Settings(); size_t self_size() const override { return sizeof(*this); } diff --git a/src/node_http2_state.h b/src/node_http2_state.h index ed88f068a04b16..64a0942f7ffa67 100644 --- a/src/node_http2_state.h +++ b/src/node_http2_state.h @@ -84,9 +84,9 @@ namespace http2 { IDX_SESSION_STATS_COUNT }; -class http2_state { +class Http2State { public: - explicit http2_state(v8::Isolate* isolate) : + explicit Http2State(v8::Isolate* isolate) : root_buffer( isolate, sizeof(http2_state_internal)), diff --git a/src/node_http_parser.cc b/src/node_http_parser.cc index 085f4494e3484e..d6f9b110c3af34 100644 --- a/src/node_http_parser.cc +++ b/src/node_http_parser.cc @@ -151,16 +151,10 @@ class Parser : public AsyncWrap, public StreamListener { : AsyncWrap(env, wrap, AsyncWrap::PROVIDER_HTTPPARSER), current_buffer_len_(0), current_buffer_data_(nullptr) { - Wrap(object(), this); Init(type); } - ~Parser() override { - ClearWrap(object()); - } - - size_t self_size() const override { return sizeof(*this); } diff --git a/src/node_i18n.cc b/src/node_i18n.cc index f491d2191d7b55..5dd30a254ab969 100644 --- a/src/node_i18n.cc +++ b/src/node_i18n.cc @@ -259,7 +259,7 @@ class ConverterObject : public BaseObject, Converter { BaseObject(env, wrap), Converter(converter, sub), ignoreBOM_(ignoreBOM) { - MakeWeak(this); + MakeWeak(); switch (ucnv_getType(converter)) { case UCNV_UTF8: diff --git a/src/node_internals.h b/src/node_internals.h index b8acfa63c26ecf..e5ea575ebc5981 100644 --- a/src/node_internals.h +++ b/src/node_internals.h @@ -244,9 +244,10 @@ v8::Local AddressToJS( template void GetSockOrPeerName(const v8::FunctionCallbackInfo& args) { - T* const wrap = Unwrap(args.Holder()); - if (wrap == nullptr) - return args.GetReturnValue().Set(UV_EBADF); + T* wrap; + ASSIGN_OR_RETURN_UNWRAP(&wrap, + args.Holder(), + args.GetReturnValue().Set(UV_EBADF)); CHECK(args[0]->IsObject()); sockaddr_storage storage; int addrlen = sizeof(storage); diff --git a/src/node_serdes.cc b/src/node_serdes.cc index 6ace942c29fd56..520b350199245a 100644 --- a/src/node_serdes.cc +++ b/src/node_serdes.cc @@ -86,7 +86,7 @@ class DeserializerContext : public BaseObject, SerializerContext::SerializerContext(Environment* env, Local wrap) : BaseObject(env, wrap), serializer_(env->isolate(), this) { - MakeWeak(this); + MakeWeak(); } void SerializerContext::ThrowDataCloneError(Local message) { @@ -274,7 +274,7 @@ DeserializerContext::DeserializerContext(Environment* env, deserializer_(env->isolate(), data_, length_, this) { object()->Set(env->context(), env->buffer_string(), buffer).FromJust(); - MakeWeak(this); + MakeWeak(); } MaybeLocal DeserializerContext::ReadHostObject(Isolate* isolate) { diff --git a/src/node_stat_watcher.cc b/src/node_stat_watcher.cc index 41683a0dc1d36c..a2cfb1088c9d25 100644 --- a/src/node_stat_watcher.cc +++ b/src/node_stat_watcher.cc @@ -83,8 +83,7 @@ static void Delete(uv_handle_t* handle) { StatWatcher::StatWatcher(Environment* env, Local wrap) : AsyncWrap(env, wrap, AsyncWrap::PROVIDER_STATWATCHER), watcher_(new uv_fs_poll_t) { - MakeWeak(this); - Wrap(wrap, this); + MakeWeak(); uv_fs_poll_init(env->event_loop(), watcher_); watcher_->data = static_cast(this); } @@ -191,7 +190,7 @@ void StatWatcher::Stop(const FunctionCallbackInfo& args) { void StatWatcher::Stop() { uv_fs_poll_stop(watcher_); - MakeWeak(this); + MakeWeak(); } diff --git a/src/node_trace_events.cc b/src/node_trace_events.cc index f37537d54544a2..0c0699f7be9e1f 100644 --- a/src/node_trace_events.cc +++ b/src/node_trace_events.cc @@ -37,7 +37,7 @@ class NodeCategorySet : public BaseObject { Local wrap, std::set categories) : BaseObject(env, wrap), categories_(categories) { - MakeWeak(this); + MakeWeak(); } bool enabled_ = false; diff --git a/src/node_win32_perfctr_provider.cc b/src/node_win32_perfctr_provider.cc index aac0203d719ebc..6ec3cbd5cfae69 100644 --- a/src/node_win32_perfctr_provider.cc +++ b/src/node_win32_perfctr_provider.cc @@ -119,7 +119,7 @@ PPERF_COUNTERSET_INSTANCE perfctr_instance; namespace node { -EXTERN_C DECLSPEC_SELECTANY HANDLE NodeCounterProvider = nullptr; +HANDLE NodeCounterProvider = nullptr; void InitPerfCountersWin32() { ULONG status; diff --git a/src/node_wrap.h b/src/node_wrap.h index 843fa7151743de..67cea2e715f869 100644 --- a/src/node_wrap.h +++ b/src/node_wrap.h @@ -33,6 +33,8 @@ namespace node { +// TODO(addaleax): Use real inheritance for the JS object templates to avoid +// this unnecessary case switching. #define WITH_GENERIC_UV_STREAM(env, obj, BODY, ELSE) \ do { \ if (env->tcp_constructor_template().IsEmpty() == false && \ diff --git a/src/node_zlib.cc b/src/node_zlib.cc index 67987baf8a375d..ec447638e2ae62 100644 --- a/src/node_zlib.cc +++ b/src/node_zlib.cc @@ -89,8 +89,6 @@ class ZCtx : public AsyncWrap { refs_(0), gzip_id_bytes_read_(0), write_result_(nullptr) { - MakeWeak(this); - Wrap(wrap, this); } @@ -662,7 +660,7 @@ class ZCtx : public AsyncWrap { void Unref() { CHECK_GT(refs_, 0); if (--refs_ == 0) { - MakeWeak(this); + MakeWeak(); } } diff --git a/src/pipe_wrap.cc b/src/pipe_wrap.cc index 0116051b3b6485..da7cb9e3ab55ba 100644 --- a/src/pipe_wrap.cc +++ b/src/pipe_wrap.cc @@ -102,12 +102,7 @@ void PipeWrap::Initialize(Local target, env->set_pipe_constructor_template(t); // Create FunctionTemplate for PipeConnectWrap. - auto constructor = [](const FunctionCallbackInfo& args) { - CHECK(args.IsConstructCall()); - ClearWrap(args.This()); - }; - auto cwt = FunctionTemplate::New(env->isolate(), constructor); - cwt->InstanceTemplate()->SetInternalFieldCount(1); + auto cwt = BaseObject::MakeLazilyInitializedJSTemplate(env); AsyncWrap::AddWrapMethods(env, cwt); Local wrapString = FIXED_ONE_BYTE_STRING(env->isolate(), "PipeConnectWrap"); diff --git a/src/stream_base-inl.h b/src/stream_base-inl.h index 392dc2c87c3ca3..cfe0de0872df4a 100644 --- a/src/stream_base-inl.h +++ b/src/stream_base-inl.h @@ -243,12 +243,6 @@ SimpleShutdownWrap::SimpleShutdownWrap( OtherBase(stream->stream_env(), req_wrap_obj, AsyncWrap::PROVIDER_SHUTDOWNWRAP) { - Wrap(req_wrap_obj, static_cast(this)); -} - -template -SimpleShutdownWrap::~SimpleShutdownWrap() { - ClearWrap(static_cast(this)->object()); } inline ShutdownWrap* StreamBase::CreateShutdownWrap( @@ -264,12 +258,6 @@ SimpleWriteWrap::SimpleWriteWrap( OtherBase(stream->stream_env(), req_wrap_obj, AsyncWrap::PROVIDER_WRITEWRAP) { - Wrap(req_wrap_obj, static_cast(this)); -} - -template -SimpleWriteWrap::~SimpleWriteWrap() { - ClearWrap(static_cast(this)->object()); } inline WriteWrap* StreamBase::CreateWriteWrap( @@ -335,8 +323,7 @@ void StreamBase::AddMethods(Environment* env, env->SetProtoMethod(t, "readStart", JSMethod); env->SetProtoMethod(t, "readStop", JSMethod); - if ((flags & kFlagNoShutdown) == 0) - env->SetProtoMethod(t, "shutdown", JSMethod); + env->SetProtoMethod(t, "shutdown", JSMethod); if ((flags & kFlagHasWritev) != 0) env->SetProtoMethod(t, "writev", JSMethod); env->SetProtoMethod(t, @@ -461,7 +448,7 @@ inline void StreamReq::ResetObject(v8::Local obj) { #ifdef DEBUG CHECK_GT(obj->InternalFieldCount(), StreamReq::kStreamReqField); #endif - ClearWrap(obj); + obj->SetAlignedPointerInInternalField(0, nullptr); // BaseObject field. obj->SetAlignedPointerInInternalField(StreamReq::kStreamReqField, nullptr); } diff --git a/src/stream_base.cc b/src/stream_base.cc index 801b7f4b2f4560..3708ffe7b65c0d 100644 --- a/src/stream_base.cc +++ b/src/stream_base.cc @@ -286,12 +286,6 @@ int StreamBase::WriteString(const FunctionCallbackInfo& args) { uv_stream_t* send_handle = nullptr; if (IsIPCPipe() && !send_handle_obj.IsEmpty()) { - // TODO(addaleax): This relies on the fact that HandleWrap comes first - // as a superclass of each individual subclass. - // There are similar assumptions in other places in the code base. - // A better idea would be having all BaseObject's internal pointers - // refer to the BaseObject* itself; this would require refactoring - // throughout the code base but makes Node rely much less on C++ quirks. HandleWrap* wrap; ASSIGN_OR_RETURN_UNWRAP(&wrap, send_handle_obj, UV_EINVAL); send_handle = reinterpret_cast(wrap->GetHandle()); diff --git a/src/stream_base.h b/src/stream_base.h index 96b8589b06831d..b91cf7df6cf8c4 100644 --- a/src/stream_base.h +++ b/src/stream_base.h @@ -257,8 +257,7 @@ class StreamBase : public StreamResource { public: enum Flags { kFlagNone = 0x0, - kFlagHasWritev = 0x1, - kFlagNoShutdown = 0x2 + kFlagHasWritev = 0x1 }; template @@ -351,7 +350,6 @@ class SimpleShutdownWrap : public ShutdownWrap, public OtherBase { public: SimpleShutdownWrap(StreamBase* stream, v8::Local req_wrap_obj); - ~SimpleShutdownWrap(); AsyncWrap* GetAsyncWrap() override { return this; } size_t self_size() const override { return sizeof(*this); } @@ -362,7 +360,6 @@ class SimpleWriteWrap : public WriteWrap, public OtherBase { public: SimpleWriteWrap(StreamBase* stream, v8::Local req_wrap_obj); - ~SimpleWriteWrap(); AsyncWrap* GetAsyncWrap() override { return this; } size_t self_size() const override { return sizeof(*this) + StorageSize(); } diff --git a/src/stream_pipe.cc b/src/stream_pipe.cc index 8f0263cd9ae99b..617a0129cfea07 100644 --- a/src/stream_pipe.cc +++ b/src/stream_pipe.cc @@ -17,7 +17,7 @@ StreamPipe::StreamPipe(StreamBase* source, StreamBase* sink, Local obj) : AsyncWrap(source->stream_env(), obj, AsyncWrap::PROVIDER_STREAMPIPE) { - MakeWeak(this); + MakeWeak(); CHECK_NE(sink, nullptr); CHECK_NE(source, nullptr); diff --git a/src/tcp_wrap.cc b/src/tcp_wrap.cc index 6158a1c4a424eb..3ccd157159c287 100644 --- a/src/tcp_wrap.cc +++ b/src/tcp_wrap.cc @@ -26,7 +26,6 @@ #include "handle_wrap.h" #include "node_buffer.h" #include "node_internals.h" -#include "node_wrap.h" #include "connect_wrap.h" #include "stream_base-inl.h" #include "stream_wrap.h" @@ -117,12 +116,8 @@ void TCPWrap::Initialize(Local target, env->set_tcp_constructor_template(t); // Create FunctionTemplate for TCPConnectWrap. - auto constructor = [](const FunctionCallbackInfo& args) { - CHECK(args.IsConstructCall()); - ClearWrap(args.This()); - }; - auto cwt = FunctionTemplate::New(env->isolate(), constructor); - cwt->InstanceTemplate()->SetInternalFieldCount(1); + Local cwt = + BaseObject::MakeLazilyInitializedJSTemplate(env); AsyncWrap::AddWrapMethods(env, cwt); Local wrapString = FIXED_ONE_BYTE_STRING(env->isolate(), "TCPConnectWrap"); diff --git a/src/timer_wrap.cc b/src/timer_wrap.cc index 02c0b8166981ab..88377a3e1b88cc 100644 --- a/src/timer_wrap.cc +++ b/src/timer_wrap.cc @@ -115,7 +115,8 @@ class TimerWrap : public HandleWrap { } static void Start(const FunctionCallbackInfo& args) { - TimerWrap* wrap = Unwrap(args.Holder()); + TimerWrap* wrap; + ASSIGN_OR_RETURN_UNWRAP(&wrap, args.Holder()); CHECK(HandleWrap::IsAlive(wrap)); @@ -125,7 +126,8 @@ class TimerWrap : public HandleWrap { } static void Stop(const FunctionCallbackInfo& args) { - TimerWrap* wrap = Unwrap(args.Holder()); + TimerWrap* wrap; + ASSIGN_OR_RETURN_UNWRAP(&wrap, args.Holder()); CHECK(HandleWrap::IsAlive(wrap)); diff --git a/src/tls_wrap.cc b/src/tls_wrap.cc index 71adbdfc52ca36..5d84a10da2e0b1 100644 --- a/src/tls_wrap.cc +++ b/src/tls_wrap.cc @@ -68,15 +68,16 @@ TLSWrap::TLSWrap(Environment* env, shutdown_(false), cycle_depth_(0), eof_(false) { - node::Wrap(object(), this); - MakeWeak(this); + MakeWeak(); // sc comes from an Unwrap. Make sure it was assigned. CHECK_NE(sc, nullptr); // We've our own session callbacks - SSL_CTX_sess_set_get_cb(sc_->ctx_, SSLWrap::GetSessionCallback); - SSL_CTX_sess_set_new_cb(sc_->ctx_, SSLWrap::NewSessionCallback); + SSL_CTX_sess_set_get_cb(sc_->ctx_.get(), + SSLWrap::GetSessionCallback); + SSL_CTX_sess_set_new_cb(sc_->ctx_.get(), + SSLWrap::NewSessionCallback); stream->PushStreamListener(this); @@ -117,35 +118,36 @@ void TLSWrap::InitSSL() { crypto::NodeBIO::FromBIO(enc_in_)->AssignEnvironment(env()); crypto::NodeBIO::FromBIO(enc_out_)->AssignEnvironment(env()); - SSL_set_bio(ssl_, enc_in_, enc_out_); + SSL_set_bio(ssl_.get(), enc_in_, enc_out_); // NOTE: This could be overridden in SetVerifyMode - SSL_set_verify(ssl_, SSL_VERIFY_NONE, crypto::VerifyCallback); + SSL_set_verify(ssl_.get(), SSL_VERIFY_NONE, crypto::VerifyCallback); #ifdef SSL_MODE_RELEASE_BUFFERS - long mode = SSL_get_mode(ssl_); // NOLINT(runtime/int) - SSL_set_mode(ssl_, mode | SSL_MODE_RELEASE_BUFFERS); + long mode = SSL_get_mode(ssl_.get()); // NOLINT(runtime/int) + SSL_set_mode(ssl_.get(), mode | SSL_MODE_RELEASE_BUFFERS); #endif // SSL_MODE_RELEASE_BUFFERS - SSL_set_app_data(ssl_, this); - SSL_set_info_callback(ssl_, SSLInfoCallback); + SSL_set_app_data(ssl_.get(), this); + SSL_set_info_callback(ssl_.get(), SSLInfoCallback); #ifdef SSL_CTRL_SET_TLSEXT_SERVERNAME_CB if (is_server()) { - SSL_CTX_set_tlsext_servername_callback(sc_->ctx_, SelectSNIContextCallback); + SSL_CTX_set_tlsext_servername_callback(sc_->ctx_.get(), + SelectSNIContextCallback); } #endif // SSL_CTRL_SET_TLSEXT_SERVERNAME_CB ConfigureSecureContext(sc_); - SSL_set_cert_cb(ssl_, SSLWrap::SSLCertCallback, this); + SSL_set_cert_cb(ssl_.get(), SSLWrap::SSLCertCallback, this); if (is_server()) { - SSL_set_accept_state(ssl_); + SSL_set_accept_state(ssl_.get()); } else if (is_client()) { // Enough space for server response (hello, cert) crypto::NodeBIO::FromBIO(enc_in_)->set_initial(kInitialClientBufferLength); - SSL_set_connect_state(ssl_); + SSL_set_connect_state(ssl_.get()); } else { // Unexpected ABORT(); @@ -343,7 +345,7 @@ Local TLSWrap::GetSSLError(int status, int* err, std::string* msg) { if (ssl_ == nullptr) return Local(); - *err = SSL_get_error(ssl_, status); + *err = SSL_get_error(ssl_.get(), status); switch (*err) { case SSL_ERROR_NONE: case SSL_ERROR_WANT_READ: @@ -396,7 +398,7 @@ void TLSWrap::ClearOut() { char out[kClearOutChunkSize]; int read; for (;;) { - read = SSL_read(ssl_, out, sizeof(out)); + read = SSL_read(ssl_.get(), out, sizeof(out)); if (read <= 0) break; @@ -422,7 +424,7 @@ void TLSWrap::ClearOut() { } } - int flags = SSL_get_shutdown(ssl_); + int flags = SSL_get_shutdown(ssl_.get()); if (!eof_ && flags & SSL_RECEIVED_SHUTDOWN) { eof_ = true; EmitRead(UV_EOF); @@ -470,7 +472,7 @@ bool TLSWrap::ClearIn() { for (i = 0; i < buffers.size(); ++i) { size_t avail = buffers[i].len; char* data = buffers[i].base; - written = SSL_write(ssl_, data, avail); + written = SSL_write(ssl_.get(), data, avail); CHECK(written == -1 || written == static_cast(avail)); if (written == -1) break; @@ -611,7 +613,7 @@ int TLSWrap::DoWrite(WriteWrap* w, int written = 0; for (i = 0; i < count; i++) { - written = SSL_write(ssl_, bufs[i].base, bufs[i].len); + written = SSL_write(ssl_.get(), bufs[i].base, bufs[i].len); CHECK(written == -1 || written == static_cast(bufs[i].len)); if (written == -1) break; @@ -691,8 +693,8 @@ ShutdownWrap* TLSWrap::CreateShutdownWrap(Local req_wrap_object) { int TLSWrap::DoShutdown(ShutdownWrap* req_wrap) { crypto::MarkPopErrorOnReturn mark_pop_error_on_return; - if (ssl_ != nullptr && SSL_shutdown(ssl_) == 0) - SSL_shutdown(ssl_); + if (ssl_ && SSL_shutdown(ssl_.get()) == 0) + SSL_shutdown(ssl_.get()); shutdown_ = true; EncOut(); @@ -727,7 +729,7 @@ void TLSWrap::SetVerifyMode(const FunctionCallbackInfo& args) { } // Always allow a connection. We'll reject in javascript. - SSL_set_verify(wrap->ssl_, verify_mode, crypto::VerifyCallback); + SSL_set_verify(wrap->ssl_.get(), verify_mode, crypto::VerifyCallback); } @@ -784,7 +786,7 @@ void TLSWrap::GetServername(const FunctionCallbackInfo& args) { CHECK_NE(wrap->ssl_, nullptr); - const char* servername = SSL_get_servername(wrap->ssl_, + const char* servername = SSL_get_servername(wrap->ssl_.get(), TLSEXT_NAMETYPE_host_name); if (servername != nullptr) { args.GetReturnValue().Set(OneByteString(env->isolate(), servername)); @@ -809,7 +811,7 @@ void TLSWrap::SetServername(const FunctionCallbackInfo& args) { #ifdef SSL_CTRL_SET_TLSEXT_SERVERNAME_CB node::Utf8Value servername(env->isolate(), args[0].As()); - SSL_set_tlsext_host_name(wrap->ssl_, *servername); + SSL_set_tlsext_host_name(wrap->ssl_.get(), *servername); #endif // SSL_CTRL_SET_TLSEXT_SERVERNAME_CB } @@ -873,16 +875,9 @@ void TLSWrap::Initialize(Local target, env->SetMethod(target, "wrap", TLSWrap::Wrap); - auto constructor = [](const FunctionCallbackInfo& args) { - CHECK(args.IsConstructCall()); - args.This()->SetAlignedPointerInInternalField(0, nullptr); - }; - + Local t = BaseObject::MakeLazilyInitializedJSTemplate(env); Local tlsWrapString = FIXED_ONE_BYTE_STRING(env->isolate(), "TLSWrap"); - - auto t = env->NewFunctionTemplate(constructor); - t->InstanceTemplate()->SetInternalFieldCount(1); t->SetClassName(tlsWrapString); Local get_write_queue_size = diff --git a/src/tty_wrap.cc b/src/tty_wrap.cc index 9977738afcbfd5..c5abc6bf9b9b91 100644 --- a/src/tty_wrap.cc +++ b/src/tty_wrap.cc @@ -61,7 +61,7 @@ void TTYWrap::Initialize(Local target, env->SetProtoMethod(t, "ref", HandleWrap::Ref); env->SetProtoMethod(t, "hasRef", HandleWrap::HasRef); - LibuvStreamWrap::AddMethods(env, t, StreamBase::kFlagNoShutdown); + LibuvStreamWrap::AddMethods(env, t); env->SetProtoMethod(t, "getWindowSize", TTYWrap::GetWindowSize); env->SetProtoMethod(t, "setRawMode", SetRawMode); diff --git a/src/udp_wrap.cc b/src/udp_wrap.cc index e02220a8784d74..414fe07eab6da8 100644 --- a/src/udp_wrap.cc +++ b/src/udp_wrap.cc @@ -53,7 +53,6 @@ using AsyncHooks = Environment::AsyncHooks; class SendWrap : public ReqWrap { public: SendWrap(Environment* env, Local req_wrap_obj, bool have_callback); - ~SendWrap(); inline bool have_callback() const; size_t msg_size; size_t self_size() const override { return sizeof(*this); } @@ -67,12 +66,6 @@ SendWrap::SendWrap(Environment* env, bool have_callback) : ReqWrap(env, req_wrap_obj, AsyncWrap::PROVIDER_UDPSENDWRAP), have_callback_(have_callback) { - Wrap(req_wrap_obj, this); -} - - -SendWrap::~SendWrap() { - ClearWrap(object()); } @@ -81,12 +74,6 @@ inline bool SendWrap::have_callback() const { } -static void NewSendWrap(const FunctionCallbackInfo& args) { - CHECK(args.IsConstructCall()); - ClearWrap(args.This()); -} - - UDPWrap::UDPWrap(Environment* env, Local object) : HandleWrap(env, object, @@ -153,8 +140,7 @@ void UDPWrap::Initialize(Local target, // Create FunctionTemplate for SendWrap Local swt = - FunctionTemplate::New(env->isolate(), NewSendWrap); - swt->InstanceTemplate()->SetInternalFieldCount(1); + BaseObject::MakeLazilyInitializedJSTemplate(env); AsyncWrap::AddWrapMethods(env, swt); Local sendWrapString = FIXED_ONE_BYTE_STRING(env->isolate(), "SendWrap"); diff --git a/src/util-inl.h b/src/util-inl.h index d07cfea9227fbe..41a22c97efd9c0 100644 --- a/src/util-inl.h +++ b/src/util-inl.h @@ -217,25 +217,6 @@ inline v8::Local OneByteString(v8::Isolate* isolate, length).ToLocalChecked(); } -template -void Wrap(v8::Local object, TypeName* pointer) { - CHECK_EQ(false, object.IsEmpty()); - CHECK_GT(object->InternalFieldCount(), 0); - object->SetAlignedPointerInInternalField(0, pointer); -} - -void ClearWrap(v8::Local object) { - Wrap(object, nullptr); -} - -template -TypeName* Unwrap(v8::Local object) { - CHECK_EQ(false, object.IsEmpty()); - CHECK_GT(object->InternalFieldCount(), 0); - void* pointer = object->GetAlignedPointerFromInternalField(0); - return static_cast(pointer); -} - void SwapBytes16(char* data, size_t nbytes) { CHECK_EQ(nbytes % 2, 0); diff --git a/src/util.h b/src/util.h index c8bad8171e3bc1..7a1c6c109fdbff 100644 --- a/src/util.h +++ b/src/util.h @@ -35,7 +35,6 @@ #include #include // std::function -#include // std::remove_reference namespace node { @@ -84,8 +83,6 @@ NO_RETURN void Abort(); NO_RETURN void Assert(const char* const (*args)[4]); void DumpBacktrace(FILE* fp); -template using remove_reference = std::remove_reference; - #define FIXED_ONE_BYTE_STRING(isolate, string) \ (node::OneByteString((isolate), (string), sizeof(string) - 1)) @@ -135,14 +132,6 @@ template using remove_reference = std::remove_reference; #define UNREACHABLE() ABORT() -#define ASSIGN_OR_RETURN_UNWRAP(ptr, obj, ...) \ - do { \ - *ptr = \ - Unwrap::type>(obj); \ - if (*ptr == nullptr) \ - return __VA_ARGS__; \ - } while (0) - // TAILQ-style intrusive list node. template class ListNode; @@ -250,13 +239,6 @@ inline v8::Local OneByteString(v8::Isolate* isolate, const unsigned char* data, int length = -1); -inline void Wrap(v8::Local object, void* pointer); - -inline void ClearWrap(v8::Local object); - -template -inline TypeName* Unwrap(v8::Local object); - // Swaps bytes in place. nbytes is the number of bytes to swap and must be a // multiple of the word size (checked by function). inline void SwapBytes16(char* data, size_t nbytes); @@ -428,7 +410,6 @@ class BufferValue : public MaybeStackBuffer { // Use this when a variable or parameter is unused in order to explicitly // silence a compiler warning about that. template inline void USE(T&&) {} -} // namespace node // Run a function when exiting the current scope. struct OnScopeLeave { @@ -438,6 +419,37 @@ struct OnScopeLeave { ~OnScopeLeave() { fn_(); } }; +// Simple RAII wrapper for contiguous data that uses malloc()/free(). +template +struct MallocedBuffer { + T* data; + size_t size; + + T* release() { + T* ret = data; + data = nullptr; + return ret; + } + + MallocedBuffer() : data(nullptr) {} + explicit MallocedBuffer(size_t size) : data(Malloc(size)), size(size) {} + MallocedBuffer(MallocedBuffer&& other) : data(other.data), size(other.size) { + other.data = nullptr; + } + MallocedBuffer& operator=(MallocedBuffer&& other) { + this->~MallocedBuffer(); + return *new(this) MallocedBuffer(other); + } + ~MallocedBuffer() { + free(data); + } + MallocedBuffer(const MallocedBuffer&) = delete; + MallocedBuffer& operator=(const MallocedBuffer&) = delete; +}; + +} // namespace node + + #endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS #endif // SRC_UTIL_H_ diff --git a/test/.eslintrc.yaml b/test/.eslintrc.yaml index 235b73baf06d49..7c90a5026b27d0 100644 --- a/test/.eslintrc.yaml +++ b/test/.eslintrc.yaml @@ -13,6 +13,7 @@ rules: node-core/prefer-common-expectserror: error node-core/prefer-common-mustnotcall: error node-core/crypto-check: error + node-core/eslint-check: error node-core/inspector-check: error node-core/number-isnan: error ## common module is mandatory in tests diff --git a/test/addons-napi/test_error/test.js b/test/addons-napi/test_error/test.js index f07326c202a569..d4b1d8a971ee09 100644 --- a/test/addons-napi/test_error/test.js +++ b/test/addons-napi/test_error/test.js @@ -61,9 +61,12 @@ assert.throws(() => { }, /^TypeError: type error$/); function testThrowArbitrary(value) { - assert.throws(() => { - test_error.throwArbitrary(value); - }, value); + assert.throws( + () => test_error.throwArbitrary(value), + (err) => { + assert.strictEqual(err, value); + return true; + }); } testThrowArbitrary(42); @@ -71,6 +74,10 @@ testThrowArbitrary({}); testThrowArbitrary([]); testThrowArbitrary(Symbol('xyzzy')); testThrowArbitrary(true); +testThrowArbitrary('ball'); +testThrowArbitrary(undefined); +testThrowArbitrary(null); +testThrowArbitrary(NaN); common.expectsError( () => test_error.throwErrorCode(), diff --git a/test/addons-napi/test_error/test_error.c b/test/addons-napi/test_error/test_error.c index c9ac7a425fae76..52a9ac7956954e 100644 --- a/test/addons-napi/test_error/test_error.c +++ b/test/addons-napi/test_error/test_error.c @@ -1,7 +1,7 @@ #include #include "../common.h" -napi_value checkError(napi_env env, napi_callback_info info) { +static napi_value checkError(napi_env env, napi_callback_info info) { size_t argc = 1; napi_value args[1]; NAPI_CALL(env, napi_get_cb_info(env, info, &argc, args, NULL, NULL)); @@ -15,7 +15,7 @@ napi_value checkError(napi_env env, napi_callback_info info) { return result; } -napi_value throwExistingError(napi_env env, napi_callback_info info) { +static napi_value throwExistingError(napi_env env, napi_callback_info info) { napi_value message; napi_value error; NAPI_CALL(env, napi_create_string_utf8( @@ -25,34 +25,34 @@ napi_value throwExistingError(napi_env env, napi_callback_info info) { return NULL; } -napi_value throwError(napi_env env, napi_callback_info info) { +static napi_value throwError(napi_env env, napi_callback_info info) { NAPI_CALL(env, napi_throw_error(env, NULL, "error")); return NULL; } -napi_value throwRangeError(napi_env env, napi_callback_info info) { +static napi_value throwRangeError(napi_env env, napi_callback_info info) { NAPI_CALL(env, napi_throw_range_error(env, NULL, "range error")); return NULL; } -napi_value throwTypeError(napi_env env, napi_callback_info info) { +static napi_value throwTypeError(napi_env env, napi_callback_info info) { NAPI_CALL(env, napi_throw_type_error(env, NULL, "type error")); return NULL; } -napi_value throwErrorCode(napi_env env, napi_callback_info info) { +static napi_value throwErrorCode(napi_env env, napi_callback_info info) { NAPI_CALL(env, napi_throw_error(env, "ERR_TEST_CODE", "Error [error]")); return NULL; } -napi_value throwRangeErrorCode(napi_env env, napi_callback_info info) { +static napi_value throwRangeErrorCode(napi_env env, napi_callback_info info) { NAPI_CALL(env, napi_throw_range_error(env, "ERR_TEST_CODE", "RangeError [range error]")); return NULL; } -napi_value throwTypeErrorCode(napi_env env, napi_callback_info info) { +static napi_value throwTypeErrorCode(napi_env env, napi_callback_info info) { NAPI_CALL(env, napi_throw_type_error(env, "ERR_TEST_CODE", "TypeError [type error]")); @@ -60,7 +60,7 @@ napi_value throwTypeErrorCode(napi_env env, napi_callback_info info) { } -napi_value createError(napi_env env, napi_callback_info info) { +static napi_value createError(napi_env env, napi_callback_info info) { napi_value result; napi_value message; NAPI_CALL(env, napi_create_string_utf8( @@ -69,7 +69,7 @@ napi_value createError(napi_env env, napi_callback_info info) { return result; } -napi_value createRangeError(napi_env env, napi_callback_info info) { +static napi_value createRangeError(napi_env env, napi_callback_info info) { napi_value result; napi_value message; NAPI_CALL(env, napi_create_string_utf8( @@ -78,7 +78,7 @@ napi_value createRangeError(napi_env env, napi_callback_info info) { return result; } -napi_value createTypeError(napi_env env, napi_callback_info info) { +static napi_value createTypeError(napi_env env, napi_callback_info info) { napi_value result; napi_value message; NAPI_CALL(env, napi_create_string_utf8( @@ -87,7 +87,7 @@ napi_value createTypeError(napi_env env, napi_callback_info info) { return result; } -napi_value createErrorCode(napi_env env, napi_callback_info info) { +static napi_value createErrorCode(napi_env env, napi_callback_info info) { napi_value result; napi_value message; napi_value code; @@ -99,7 +99,7 @@ napi_value createErrorCode(napi_env env, napi_callback_info info) { return result; } -napi_value createRangeErrorCode(napi_env env, napi_callback_info info) { +static napi_value createRangeErrorCode(napi_env env, napi_callback_info info) { napi_value result; napi_value message; napi_value code; @@ -113,7 +113,7 @@ napi_value createRangeErrorCode(napi_env env, napi_callback_info info) { return result; } -napi_value createTypeErrorCode(napi_env env, napi_callback_info info) { +static napi_value createTypeErrorCode(napi_env env, napi_callback_info info) { napi_value result; napi_value message; napi_value code; @@ -135,7 +135,7 @@ static napi_value throwArbitrary(napi_env env, napi_callback_info info) { return NULL; } -napi_value Init(napi_env env, napi_value exports) { +static napi_value Init(napi_env env, napi_value exports) { napi_property_descriptor descriptors[] = { DECLARE_NAPI_PROPERTY("checkError", checkError), DECLARE_NAPI_PROPERTY("throwExistingError", throwExistingError), diff --git a/test/cctest/test_node_postmortem_metadata.cc b/test/cctest/test_node_postmortem_metadata.cc index 335f3e85812547..f69df3ed227717 100644 --- a/test/cctest/test_node_postmortem_metadata.cc +++ b/test/cctest/test_node_postmortem_metadata.cc @@ -72,7 +72,11 @@ TEST_F(DebugSymbolsTest, BaseObjectPersistentHandle) { const Argv argv; Env env{handle_scope, argv}; - v8::Local object = v8::Object::New(isolate_); + v8::Local obj_templ = v8::ObjectTemplate::New(isolate_); + obj_templ->SetInternalFieldCount(1); + + v8::Local object = + obj_templ->NewInstance(env.context()).ToLocalChecked(); node::BaseObject obj(*env, object); auto expected = reinterpret_cast(&obj.persistent()); diff --git a/test/common/index.js b/test/common/index.js index bbd2b62d7da768..07c0992d65e8d2 100644 --- a/test/common/index.js +++ b/test/common/index.js @@ -494,7 +494,7 @@ exports.fileExists = function(pathname) { exports.skipIfEslintMissing = function() { if (!exports.fileExists( - path.join('..', '..', 'tools', 'node_modules', 'eslint') + path.join(__dirname, '..', '..', 'tools', 'node_modules', 'eslint') )) { exports.skip('missing ESLint'); } @@ -518,6 +518,8 @@ exports.canCreateSymLink = function() { return false; } } + // On non-Windows platforms, this always returns `true` + return true; }; exports.getCallSite = function getCallSite(top) { diff --git a/test/doctool/test-doctool-html.js b/test/doctool/test-doctool-html.js index 91037bfd6501bc..91a18d536a6992 100644 --- a/test/doctool/test-doctool-html.js +++ b/test/doctool/test-doctool-html.js @@ -29,11 +29,11 @@ const testData = [ file: fixtures.path('order_of_end_tags_5873.md'), html: '

ClassMethod: Buffer.from(array) ' + '#

' }, { file: fixtures.path('doc_with_yaml.md'), diff --git a/test/parallel/test-benchmark-util.js b/test/parallel/test-benchmark-util.js index 9a6ae370b7d312..838e51daac26b4 100644 --- a/test/parallel/test-benchmark-util.js +++ b/test/parallel/test-benchmark-util.js @@ -10,6 +10,8 @@ runBenchmark('util', 'method=Array', 'n=1', 'option=none', + 'pos=start', + 'size=1', 'type=', 'version=native'], { NODEJS_BENCHMARK_ZERO_ALLOWED: 1 }); diff --git a/test/parallel/test-buffer-writedouble.js b/test/parallel/test-buffer-writedouble.js index 0dc26dbd3b23f1..8a17d536909dce 100644 --- a/test/parallel/test-buffer-writedouble.js +++ b/test/parallel/test-buffer-writedouble.js @@ -67,10 +67,19 @@ assert.strictEqual(buffer.readDoubleLE(8), -Infinity); buffer.writeDoubleBE(NaN, 0); buffer.writeDoubleLE(NaN, 8); -assert.ok(buffer.equals(new Uint8Array([ - 0x7F, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0x7F -]))); +// JS only knows a single NaN but there exist two platform specific +// implementations. Therefore, allow both quiet and signalling NaNs. +if (buffer[1] === 0xF7) { + assert.ok(buffer.equals(new Uint8Array([ + 0x7F, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0x7F + ]))); +} else { + assert.ok(buffer.equals(new Uint8Array([ + 0x7F, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0x7F + ]))); +} assert.ok(Number.isNaN(buffer.readDoubleBE(0))); assert.ok(Number.isNaN(buffer.readDoubleLE(8))); diff --git a/test/parallel/test-buffer-writefloat.js b/test/parallel/test-buffer-writefloat.js index 77dd4793e5e749..4c2c7539eaafcb 100644 --- a/test/parallel/test-buffer-writefloat.js +++ b/test/parallel/test-buffer-writefloat.js @@ -50,8 +50,18 @@ assert.strictEqual(buffer.readFloatLE(4), -Infinity); buffer.writeFloatBE(NaN, 0); buffer.writeFloatLE(NaN, 4); -assert.ok(buffer.equals( - new Uint8Array([ 0x7F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0xC0, 0x7F ]))); + +// JS only knows a single NaN but there exist two platform specific +// implementations. Therefore, allow both quiet and signalling NaNs. +if (buffer[1] === 0xBF) { + assert.ok( + buffer.equals(new Uint8Array( + [ 0x7F, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0x7F ]))); +} else { + assert.ok( + buffer.equals(new Uint8Array( + [ 0x7F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0xC0, 0x7F ]))); +} assert.ok(Number.isNaN(buffer.readFloatBE(0))); assert.ok(Number.isNaN(buffer.readFloatLE(4))); diff --git a/test/parallel/test-child-process-fork-exec-path.js b/test/parallel/test-child-process-fork-exec-path.js index 8b94ef62a93bc8..cabd8893475630 100644 --- a/test/parallel/test-child-process-fork-exec-path.js +++ b/test/parallel/test-child-process-fork-exec-path.js @@ -23,6 +23,7 @@ const common = require('../common'); const assert = require('assert'); const fs = require('fs'); +const { COPYFILE_FICLONE } = fs.constants; const path = require('path'); const tmpdir = require('../common/tmpdir'); const msg = { test: 'this' }; @@ -44,7 +45,7 @@ if (process.env.FORK) { } catch (e) { if (e.code !== 'ENOENT') throw e; } - fs.writeFileSync(copyPath, fs.readFileSync(nodePath)); + fs.copyFileSync(nodePath, copyPath, COPYFILE_FICLONE); fs.chmodSync(copyPath, '0755'); // slow but simple diff --git a/test/parallel/test-cli-node-options.js b/test/parallel/test-cli-node-options.js index 8eae27b1a2a3a2..2383935f4bb7a3 100644 --- a/test/parallel/test-cli-node-options.js +++ b/test/parallel/test-cli-node-options.js @@ -32,7 +32,7 @@ if (!common.isWindows) { expect('--perf-basic-prof', 'B\n'); } -if (common.isLinux && ['arm', 'x64', 'mips'].includes(process.arch)) { +if (common.isLinux && ['arm', 'x64'].includes(process.arch)) { // PerfJitLogger is only implemented in Linux. expect('--perf-prof', 'B\n'); } diff --git a/test/parallel/test-crypto-des3-wrap.js b/test/parallel/test-crypto-des3-wrap.js new file mode 100644 index 00000000000000..75c8cd574fd662 --- /dev/null +++ b/test/parallel/test-crypto-des3-wrap.js @@ -0,0 +1,25 @@ +'use strict'; +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); + +const assert = require('assert'); +const crypto = require('crypto'); + +// Test case for des-ede3 wrap/unwrap. des3-wrap needs extra 2x blocksize +// then plaintext to store ciphertext. +const test = { + key: Buffer.from('3c08e25be22352910671cfe4ba3652b1220a8a7769b490ba', 'hex'), + iv: Buffer.alloc(0), + plaintext: '32|RmVZZkFUVmpRRkp0TmJaUm56ZU9qcnJkaXNNWVNpTTU*|iXmckfRWZBG' + + 'WWELweCBsThSsfUHLeRe0KCsK8ooHgxie0zOINpXxfZi/oNG7uq9JWFVCk70gfzQH8ZU' + + 'JjAfaFg**' +}; + +const cipher = crypto.createCipheriv('des3-wrap', test.key, test.iv); +const ciphertext = cipher.update(test.plaintext, 'utf8'); + +const decipher = crypto.createDecipheriv('des3-wrap', test.key, test.iv); +const msg = decipher.update(ciphertext, 'buffer', 'utf8'); + +assert.strictEqual(msg, test.plaintext); diff --git a/test/parallel/test-env-newprotomethod-remove-unnecessary-prototypes.js b/test/parallel/test-env-newprotomethod-remove-unnecessary-prototypes.js new file mode 100644 index 00000000000000..bcc522cd45ad50 --- /dev/null +++ b/test/parallel/test-env-newprotomethod-remove-unnecessary-prototypes.js @@ -0,0 +1,18 @@ +'use strict'; +require('../common'); + +// This test ensures that unnecessary prototypes are no longer +// being generated by Environment::NewFunctionTemplate. + +const assert = require('assert'); + +[ + process.binding('udp_wrap').UDP.prototype.bind6, + process.binding('tcp_wrap').TCP.prototype.bind6, + process.binding('udp_wrap').UDP.prototype.send6, + process.binding('tcp_wrap').TCP.prototype.bind, + process.binding('udp_wrap').UDP.prototype.close, + process.binding('tcp_wrap').TCP.prototype.open +].forEach((binding, i) => { + assert.strictEqual('prototype' in binding, false, `Test ${i} failed`); +}); diff --git a/test/parallel/test-eslint-alphabetize-errors.js b/test/parallel/test-eslint-alphabetize-errors.js index 220f09d54eb69e..3f2a5b3fd35c8f 100644 --- a/test/parallel/test-eslint-alphabetize-errors.js +++ b/test/parallel/test-eslint-alphabetize-errors.js @@ -1,6 +1,7 @@ 'use strict'; -require('../common'); +const common = require('../common'); +common.skipIfEslintMissing(); const RuleTester = require('../../tools/node_modules/eslint').RuleTester; const rule = require('../../tools/eslint-rules/alphabetize-errors'); diff --git a/test/parallel/test-eslint-buffer-constructor.js b/test/parallel/test-eslint-buffer-constructor.js index 6b9254f9379b06..daa1a527673c1a 100644 --- a/test/parallel/test-eslint-buffer-constructor.js +++ b/test/parallel/test-eslint-buffer-constructor.js @@ -1,6 +1,7 @@ 'use strict'; -require('../common'); +const common = require('../common'); +common.skipIfEslintMissing(); const RuleTester = require('../../tools/node_modules/eslint').RuleTester; const rule = require('../../tools/eslint-rules/buffer-constructor'); diff --git a/test/parallel/test-eslint-documented-errors.js b/test/parallel/test-eslint-documented-errors.js index 50c92acd151215..533b829e78a298 100644 --- a/test/parallel/test-eslint-documented-errors.js +++ b/test/parallel/test-eslint-documented-errors.js @@ -1,6 +1,7 @@ 'use strict'; -require('../common'); +const common = require('../common'); +common.skipIfEslintMissing(); const RuleTester = require('../../tools/node_modules/eslint').RuleTester; const rule = require('../../tools/eslint-rules/documented-errors'); diff --git a/test/parallel/test-eslint-eslint-check.js b/test/parallel/test-eslint-eslint-check.js new file mode 100644 index 00000000000000..46e2a6a4a2ce47 --- /dev/null +++ b/test/parallel/test-eslint-eslint-check.js @@ -0,0 +1,31 @@ +'use strict'; + +const common = require('../common'); + +common.skipIfEslintMissing(); + +const RuleTester = require('../../tools/node_modules/eslint').RuleTester; +const rule = require('../../tools/eslint-rules/eslint-check'); + +const message = 'Please add a skipIfEslintMissing() call to allow this ' + + 'test to be skipped when Node.js is built ' + + 'from a source tarball.'; + +new RuleTester().run('eslint-check', rule, { + valid: [ + 'foo;', + 'require("common")\n' + + 'common.skipIfEslintMissing();\n' + + 'require("../../tools/node_modules/eslint")' + ], + invalid: [ + { + code: 'require("common")\n' + + 'require("../../tools/node_modules/eslint").RuleTester', + errors: [{ message }], + output: 'require("common")\n' + + 'common.skipIfEslintMissing();\n' + + 'require("../../tools/node_modules/eslint").RuleTester' + } + ] +}); diff --git a/test/parallel/test-eslint-inspector-check.js b/test/parallel/test-eslint-inspector-check.js index bdec596f8d128e..ae71c004029771 100644 --- a/test/parallel/test-eslint-inspector-check.js +++ b/test/parallel/test-eslint-inspector-check.js @@ -1,12 +1,13 @@ 'use strict'; -require('../common'); +const common = require('../common'); +common.skipIfEslintMissing(); const RuleTester = require('../../tools/node_modules/eslint').RuleTester; const rule = require('../../tools/eslint-rules/inspector-check'); const message = 'Please add a skipIfInspectorDisabled() call to allow this ' + - 'test to be skippped when Node is built ' + + 'test to be skipped when Node is built ' + '\'--without-inspector\'.'; new RuleTester().run('inspector-check', rule, { diff --git a/test/parallel/test-eslint-require-buffer.js b/test/parallel/test-eslint-require-buffer.js index bdc794dd594240..da17d44c7f600d 100644 --- a/test/parallel/test-eslint-require-buffer.js +++ b/test/parallel/test-eslint-require-buffer.js @@ -11,7 +11,7 @@ const ruleTester = new RuleTester({ env: { node: true } }); -const message = "Use const Buffer = require('buffer').Buffer; " + +const message = "Use const { Buffer } = require('buffer'); " + 'at the beginning of this file'; const useStrict = '\'use strict\';\n\n'; diff --git a/test/parallel/test-fixed-queue.js b/test/parallel/test-fixed-queue.js new file mode 100644 index 00000000000000..a50be1309a5ea8 --- /dev/null +++ b/test/parallel/test-fixed-queue.js @@ -0,0 +1,34 @@ +// Flags: --expose-internals +'use strict'; + +require('../common'); + +const assert = require('assert'); +const FixedQueue = require('internal/fixed_queue'); + +{ + const queue = new FixedQueue(); + assert.strictEqual(queue.head, queue.tail); + assert(queue.isEmpty()); + queue.push('a'); + assert(!queue.isEmpty()); + assert.strictEqual(queue.shift(), 'a'); + assert.strictEqual(queue.shift(), null); +} + +{ + const queue = new FixedQueue(); + for (let i = 0; i < 2047; i++) + queue.push('a'); + assert(queue.head.isFull()); + queue.push('a'); + assert(!queue.head.isFull()); + + assert.notStrictEqual(queue.head, queue.tail); + for (let i = 0; i < 2047; i++) + assert.strictEqual(queue.shift(), 'a'); + assert.strictEqual(queue.head, queue.tail); + assert(!queue.isEmpty()); + assert.strictEqual(queue.shift(), 'a'); + assert(queue.isEmpty()); +} diff --git a/test/parallel/test-fs-filehandle.js b/test/parallel/test-fs-filehandle.js index 8ddc11ec3e0e07..84b462aa90914d 100644 --- a/test/parallel/test-fs-filehandle.js +++ b/test/parallel/test-fs-filehandle.js @@ -5,7 +5,7 @@ const common = require('../common'); const assert = require('assert'); const path = require('path'); const fs = process.binding('fs'); -const { stringToFlags } = require('internal/fs'); +const { stringToFlags } = require('internal/fs/utils'); // Verifies that the FileHandle object is garbage collected and that a // warning is emitted if it is not closed. diff --git a/test/parallel/test-fs-open-flags.js b/test/parallel/test-fs-open-flags.js index 7f70885861ffd1..546d68e31274f4 100644 --- a/test/parallel/test-fs-open-flags.js +++ b/test/parallel/test-fs-open-flags.js @@ -39,7 +39,7 @@ const O_DSYNC = fs.constants.O_DSYNC || 0; const O_TRUNC = fs.constants.O_TRUNC || 0; const O_WRONLY = fs.constants.O_WRONLY || 0; -const { stringToFlags } = require('internal/fs'); +const { stringToFlags } = require('internal/fs/utils'); assert.strictEqual(stringToFlags('r'), O_RDONLY); assert.strictEqual(stringToFlags('r+'), O_RDWR); diff --git a/test/parallel/test-fs-promises-file-handle-append-file.js b/test/parallel/test-fs-promises-file-handle-append-file.js index 38336a2b43a57e..7766ac4c904642 100644 --- a/test/parallel/test-fs-promises-file-handle-append-file.js +++ b/test/parallel/test-fs-promises-file-handle-append-file.js @@ -2,11 +2,11 @@ const common = require('../common'); -// The following tests validate base functionality for the fs/promises +// The following tests validate base functionality for the fs.promises // FileHandle.appendFile method. const fs = require('fs'); -const { open } = require('fs/promises'); +const { open } = fs.promises; const path = require('path'); const tmpdir = require('../common/tmpdir'); const assert = require('assert'); diff --git a/test/parallel/test-fs-promises-file-handle-chmod.js b/test/parallel/test-fs-promises-file-handle-chmod.js index c2a44fba7bd32c..8b9d8b1c0d193d 100644 --- a/test/parallel/test-fs-promises-file-handle-chmod.js +++ b/test/parallel/test-fs-promises-file-handle-chmod.js @@ -2,11 +2,11 @@ const common = require('../common'); -// The following tests validate base functionality for the fs/promises +// The following tests validate base functionality for the fs.promises // FileHandle.chmod method. const fs = require('fs'); -const { open } = require('fs/promises'); +const { open } = fs.promises; const path = require('path'); const tmpdir = require('../common/tmpdir'); const assert = require('assert'); diff --git a/test/parallel/test-fs-promises-file-handle-read.js b/test/parallel/test-fs-promises-file-handle-read.js index 5a9bc4558cf15f..a397b0e260aff4 100644 --- a/test/parallel/test-fs-promises-file-handle-read.js +++ b/test/parallel/test-fs-promises-file-handle-read.js @@ -2,11 +2,11 @@ const common = require('../common'); -// The following tests validate base functionality for the fs/promises +// The following tests validate base functionality for the fs.promises // FileHandle.read method. const fs = require('fs'); -const { open } = require('fs/promises'); +const { open } = fs.promises; const path = require('path'); const tmpdir = require('../common/tmpdir'); const assert = require('assert'); diff --git a/test/parallel/test-fs-promises-file-handle-readFile.js b/test/parallel/test-fs-promises-file-handle-readFile.js index 9308c299092714..316fd6581fa446 100644 --- a/test/parallel/test-fs-promises-file-handle-readFile.js +++ b/test/parallel/test-fs-promises-file-handle-readFile.js @@ -2,11 +2,11 @@ const common = require('../common'); -// The following tests validate base functionality for the fs/promises +// The following tests validate base functionality for the fs.promises // FileHandle.readFile method. const fs = require('fs'); -const { open } = require('fs/promises'); +const { open } = fs.promises; const path = require('path'); const tmpdir = require('../common/tmpdir'); const assert = require('assert'); diff --git a/test/parallel/test-fs-promises-file-handle-stat.js b/test/parallel/test-fs-promises-file-handle-stat.js new file mode 100644 index 00000000000000..7d44b8e3dae2b7 --- /dev/null +++ b/test/parallel/test-fs-promises-file-handle-stat.js @@ -0,0 +1,24 @@ +'use strict'; + +const common = require('../common'); + +// The following tests validate base functionality for the fs.promises +// FileHandle.stat method. + +const { open } = require('fs').promises; +const path = require('path'); +const tmpdir = require('../common/tmpdir'); +const assert = require('assert'); + +tmpdir.refresh(); +common.crashOnUnhandledRejection(); + +async function validateStat() { + const filePath = path.resolve(tmpdir.path, 'tmp-read-file.txt'); + const fileHandle = await open(filePath, 'w+'); + const stats = await fileHandle.stat(); + assert.ok(stats.mtime instanceof Date); +} + +validateStat() + .then(common.mustCall()); diff --git a/test/parallel/test-fs-promises-file-handle-write.js b/test/parallel/test-fs-promises-file-handle-write.js index b5c83a169021c2..d7812745c5439e 100644 --- a/test/parallel/test-fs-promises-file-handle-write.js +++ b/test/parallel/test-fs-promises-file-handle-write.js @@ -2,11 +2,11 @@ const common = require('../common'); -// The following tests validate base functionality for the fs/promises +// The following tests validate base functionality for the fs.promises // FileHandle.read method. const fs = require('fs'); -const { open } = require('fs/promises'); +const { open } = fs.promises; const path = require('path'); const tmpdir = require('../common/tmpdir'); const assert = require('assert'); diff --git a/test/parallel/test-fs-promises-file-handle-writeFile.js b/test/parallel/test-fs-promises-file-handle-writeFile.js index 196b6f8db8cd58..a53384cc221645 100644 --- a/test/parallel/test-fs-promises-file-handle-writeFile.js +++ b/test/parallel/test-fs-promises-file-handle-writeFile.js @@ -2,11 +2,11 @@ const common = require('../common'); -// The following tests validate base functionality for the fs/promises +// The following tests validate base functionality for the fs.promises // FileHandle.readFile method. const fs = require('fs'); -const { open } = require('fs/promises'); +const { open } = fs.promises; const path = require('path'); const tmpdir = require('../common/tmpdir'); const assert = require('assert'); diff --git a/test/parallel/test-fs-promises-readfile.js b/test/parallel/test-fs-promises-readfile.js index 1bf49503c312c0..4334673c30f5fb 100644 --- a/test/parallel/test-fs-promises-readfile.js +++ b/test/parallel/test-fs-promises-readfile.js @@ -4,7 +4,7 @@ const common = require('../common'); const assert = require('assert'); const path = require('path'); -const { writeFile, readFile } = require('fs/promises'); +const { writeFile, readFile } = require('fs').promises; const tmpdir = require('../common/tmpdir'); tmpdir.refresh(); diff --git a/test/parallel/test-fs-promises-writefile.js b/test/parallel/test-fs-promises-writefile.js index 280bce864229ce..1bb6945c6782da 100644 --- a/test/parallel/test-fs-promises-writefile.js +++ b/test/parallel/test-fs-promises-writefile.js @@ -2,7 +2,7 @@ const common = require('../common'); const fs = require('fs'); -const fsPromises = require('fs/promises'); +const fsPromises = fs.promises; const path = require('path'); const tmpdir = require('../common/tmpdir'); const assert = require('assert'); diff --git a/test/parallel/test-fs-promises.js b/test/parallel/test-fs-promises.js index ba3a91d5b39f8a..43b5da700484ff 100644 --- a/test/parallel/test-fs-promises.js +++ b/test/parallel/test-fs-promises.js @@ -5,7 +5,7 @@ const assert = require('assert'); const tmpdir = require('../common/tmpdir'); const fixtures = require('../common/fixtures'); const path = require('path'); -const fsPromises = require('fs/promises'); +const fsPromises = require('fs').promises; const { access, chmod, diff --git a/test/parallel/test-fs-realpath.js b/test/parallel/test-fs-realpath.js index 6cfd79cecfb694..d7efc748bc9662 100644 --- a/test/parallel/test-fs-realpath.js +++ b/test/parallel/test-fs-realpath.js @@ -27,11 +27,10 @@ const tmpdir = require('../common/tmpdir'); const assert = require('assert'); const fs = require('fs'); const path = require('path'); -const exec = require('child_process').exec; let async_completed = 0; let async_expected = 0; const unlink = []; -let skipSymlinks = false; +const skipSymlinks = !common.canCreateSymLink(); const tmpDir = tmpdir.path; tmpdir.refresh(); @@ -45,25 +44,9 @@ if (common.isWindows) { assert .strictEqual(path_left.toLowerCase(), path_right.toLowerCase(), message); }; - - // On Windows, creating symlinks requires admin privileges. - // We'll only try to run symlink test if we have enough privileges. - try { - exec('whoami /priv', function(err, o) { - if (err || !o.includes('SeCreateSymbolicLinkPrivilege')) { - skipSymlinks = true; - } - runTest(); - }); - } catch (er) { - // better safe than sorry - skipSymlinks = true; - process.nextTick(runTest); - } -} else { - process.nextTick(runTest); } +process.nextTick(runTest); function tmp(p) { return path.join(tmpDir, p); diff --git a/test/parallel/test-fs-syncwritestream.js b/test/parallel/test-fs-syncwritestream.js index a014277a6ba259..8fbe665a4047d0 100644 --- a/test/parallel/test-fs-syncwritestream.js +++ b/test/parallel/test-fs-syncwritestream.js @@ -6,8 +6,8 @@ const stream = require('stream'); const fs = require('fs'); const path = require('path'); -// require('internal/fs').SyncWriteStream is used as a stdio implementation -// when stdout/stderr point to files. +// require('internal/fs/utils').SyncWriteStream is used as a stdio +// implementation when stdout/stderr point to files. if (process.argv[2] === 'child') { // Note: Calling console.log() is part of this test as it exercises the diff --git a/test/parallel/test-http-agent-keepalive.js b/test/parallel/test-http-agent-keepalive.js index 8ac8d79df192b0..917c4ee8101f9e 100644 --- a/test/parallel/test-http-agent-keepalive.js +++ b/test/parallel/test-http-agent-keepalive.js @@ -92,8 +92,7 @@ function remoteClose() { // waiting remote server close the socket setTimeout(common.mustCall(() => { assert.strictEqual(agent.sockets[name], undefined); - assert.strictEqual(agent.freeSockets[name], undefined, - 'freeSockets is not empty'); + assert.strictEqual(agent.freeSockets[name], undefined); remoteError(); }), common.platformTimeout(200)); })); diff --git a/test/parallel/test-http2-client-destroy.js b/test/parallel/test-http2-client-destroy.js index e641335e751287..6238363511a791 100644 --- a/test/parallel/test-http2-client-destroy.js +++ b/test/parallel/test-http2-client-destroy.js @@ -95,6 +95,7 @@ const Countdown = require('../common/countdown'); }); req.resume(); + req.on('end', common.mustCall()); req.on('close', common.mustCall(() => server.close())); })); } diff --git a/test/parallel/test-http2-client-onconnect-errors.js b/test/parallel/test-http2-client-onconnect-errors.js index f427bfb4907339..a75dc590c669a1 100644 --- a/test/parallel/test-http2-client-onconnect-errors.js +++ b/test/parallel/test-http2-client-onconnect-errors.js @@ -101,6 +101,7 @@ function runTest(test) { }); } + req.on('end', common.mustCall()); req.on('close', common.mustCall(() => { client.destroy(); diff --git a/test/parallel/test-http2-client-stream-destroy-before-connect.js b/test/parallel/test-http2-client-stream-destroy-before-connect.js index 9e81015ec58d28..d834de5d11ebe7 100644 --- a/test/parallel/test-http2-client-stream-destroy-before-connect.js +++ b/test/parallel/test-http2-client-stream-destroy-before-connect.js @@ -45,4 +45,5 @@ server.listen(0, common.mustCall(() => { req.on('response', common.mustNotCall()); req.resume(); + req.on('end', common.mustCall()); })); diff --git a/test/parallel/test-http2-compat-serverresponse-destroy.js b/test/parallel/test-http2-compat-serverresponse-destroy.js index 49822082979a01..8ee52a74ab4e81 100644 --- a/test/parallel/test-http2-compat-serverresponse-destroy.js +++ b/test/parallel/test-http2-compat-serverresponse-destroy.js @@ -63,6 +63,7 @@ server.listen(0, common.mustCall(() => { req.on('close', common.mustCall(() => countdown.dec())); req.resume(); + req.on('end', common.mustCall()); } { @@ -77,5 +78,6 @@ server.listen(0, common.mustCall(() => { req.on('close', common.mustCall(() => countdown.dec())); req.resume(); + req.on('end', common.mustCall()); } })); diff --git a/test/parallel/test-http2-max-concurrent-streams.js b/test/parallel/test-http2-max-concurrent-streams.js index 2b576700aa4e00..b270d6cc6aff31 100644 --- a/test/parallel/test-http2-max-concurrent-streams.js +++ b/test/parallel/test-http2-max-concurrent-streams.js @@ -45,6 +45,7 @@ server.listen(0, common.mustCall(() => { req.on('aborted', common.mustCall()); req.on('response', common.mustNotCall()); req.resume(); + req.on('end', common.mustCall()); req.on('close', common.mustCall(() => countdown.dec())); req.on('error', common.expectsError({ code: 'ERR_HTTP2_STREAM_ERROR', diff --git a/test/parallel/test-http2-misused-pseudoheaders.js b/test/parallel/test-http2-misused-pseudoheaders.js index 0b7becef5f6f0a..c1ae37b9a36938 100644 --- a/test/parallel/test-http2-misused-pseudoheaders.js +++ b/test/parallel/test-http2-misused-pseudoheaders.js @@ -41,6 +41,7 @@ server.listen(0, common.mustCall(() => { req.on('response', common.mustCall()); req.resume(); + req.on('end', common.mustCall()); req.on('close', common.mustCall(() => { server.close(); client.close(); diff --git a/test/parallel/test-http2-multi-content-length.js b/test/parallel/test-http2-multi-content-length.js index 908f6ecd64fea1..7d8ff4858fedbb 100644 --- a/test/parallel/test-http2-multi-content-length.js +++ b/test/parallel/test-http2-multi-content-length.js @@ -53,6 +53,7 @@ server.listen(0, common.mustCall(() => { // header to be set for non-payload bearing requests... const req = client.request({ 'content-length': 1 }); req.resume(); + req.on('end', common.mustCall()); req.on('close', common.mustCall(() => countdown.dec())); req.on('error', common.expectsError({ code: 'ERR_HTTP2_STREAM_ERROR', diff --git a/test/parallel/test-http2-respond-file-fd-invalid.js b/test/parallel/test-http2-respond-file-fd-invalid.js index 28d1c0f057dd23..21fcf790b449eb 100644 --- a/test/parallel/test-http2-respond-file-fd-invalid.js +++ b/test/parallel/test-http2-respond-file-fd-invalid.js @@ -40,7 +40,7 @@ server.listen(0, () => { req.on('response', common.mustCall()); req.on('error', common.mustCall(errorCheck)); req.on('data', common.mustNotCall()); - req.on('close', common.mustCall(() => { + req.on('end', common.mustCall(() => { assert.strictEqual(req.rstCode, NGHTTP2_INTERNAL_ERROR); client.close(); server.close(); diff --git a/test/parallel/test-http2-respond-nghttperrors.js b/test/parallel/test-http2-respond-nghttperrors.js index 4adf678b681b09..ad9eee0d59fecc 100644 --- a/test/parallel/test-http2-respond-nghttperrors.js +++ b/test/parallel/test-http2-respond-nghttperrors.js @@ -87,7 +87,7 @@ function runTest(test) { req.resume(); req.end(); - req.on('close', common.mustCall(() => { + req.on('end', common.mustCall(() => { client.close(); if (!tests.length) { diff --git a/test/parallel/test-http2-respond-with-fd-errors.js b/test/parallel/test-http2-respond-with-fd-errors.js index 7e7394d29305cc..3a671a3e36490a 100644 --- a/test/parallel/test-http2-respond-with-fd-errors.js +++ b/test/parallel/test-http2-respond-with-fd-errors.js @@ -95,7 +95,7 @@ function runTest(test) { req.resume(); req.end(); - req.on('close', common.mustCall(() => { + req.on('end', common.mustCall(() => { client.close(); if (!tests.length) { diff --git a/test/parallel/test-http2-server-shutdown-before-respond.js b/test/parallel/test-http2-server-shutdown-before-respond.js index 50b3a5572a58e6..33f224fc69a9d5 100644 --- a/test/parallel/test-http2-server-shutdown-before-respond.js +++ b/test/parallel/test-http2-server-shutdown-before-respond.js @@ -32,5 +32,5 @@ server.on('listening', common.mustCall(() => { })); req.resume(); req.on('data', common.mustNotCall()); - req.on('close', common.mustCall(() => server.close())); + req.on('end', common.mustCall(() => server.close())); })); diff --git a/test/parallel/test-http2-server-socket-destroy.js b/test/parallel/test-http2-server-socket-destroy.js index d631ef032b823b..03afc1957b8af4 100644 --- a/test/parallel/test-http2-server-socket-destroy.js +++ b/test/parallel/test-http2-server-socket-destroy.js @@ -52,4 +52,5 @@ server.on('listening', common.mustCall(() => { req.on('aborted', common.mustCall()); req.resume(); + req.on('end', common.mustCall()); })); diff --git a/test/parallel/test-https-max-headers-count.js b/test/parallel/test-https-max-headers-count.js new file mode 100644 index 00000000000000..8c099d1e5fb841 --- /dev/null +++ b/test/parallel/test-https-max-headers-count.js @@ -0,0 +1,73 @@ +'use strict'; +const common = require('../common'); +const fixtures = require('../common/fixtures'); + +if (!common.hasCrypto) + common.skip('missing crypto'); + +const assert = require('assert'); +const https = require('https'); + +const serverOptions = { + key: fixtures.readKey('agent1-key.pem'), + cert: fixtures.readKey('agent1-cert.pem') +}; + +let requests = 0; +let responses = 0; + +const headers = {}; +const N = 2000; +for (let i = 0; i < N; ++i) { + headers[`key${i}`] = i; +} + +const maxAndExpected = [ // for server + [50, 50], + [1500, 1500], + [0, N + 2] // Host and Connection +]; +let max = maxAndExpected[requests][0]; +let expected = maxAndExpected[requests][1]; + +const server = https.createServer(serverOptions, common.mustCall((req, res) => { + assert.strictEqual(Object.keys(req.headers).length, expected); + if (++requests < maxAndExpected.length) { + max = maxAndExpected[requests][0]; + expected = maxAndExpected[requests][1]; + server.maxHeadersCount = max; + } + res.writeHead(200, headers); + res.end(); +}, 3)); +server.maxHeadersCount = max; + +server.listen(0, common.mustCall(() => { + const maxAndExpected = [ // for client + [20, 20], + [1200, 1200], + [0, N + 3] // Connection, Date and Transfer-Encoding + ]; + const doRequest = common.mustCall(() => { + const max = maxAndExpected[responses][0]; + const expected = maxAndExpected[responses][1]; + const req = https.request({ + port: server.address().port, + headers: headers, + rejectUnauthorized: false + }, (res) => { + assert.strictEqual(Object.keys(res.headers).length, expected); + res.on('end', () => { + if (++responses < maxAndExpected.length) { + doRequest(); + } else { + server.close(); + } + }); + res.resume(); + }); + req.maxHeadersCount = max; + req.end(); + }, 3); + doRequest(); +})); diff --git a/test/parallel/test-internal-fs-syncwritestream.js b/test/parallel/test-internal-fs-syncwritestream.js index c474d21cb43826..c751baf555d20f 100644 --- a/test/parallel/test-internal-fs-syncwritestream.js +++ b/test/parallel/test-internal-fs-syncwritestream.js @@ -5,7 +5,7 @@ const common = require('../common'); const assert = require('assert'); const fs = require('fs'); const path = require('path'); -const SyncWriteStream = require('internal/fs').SyncWriteStream; +const SyncWriteStream = require('internal/fs/utils').SyncWriteStream; const tmpdir = require('../common/tmpdir'); tmpdir.refresh(); diff --git a/test/parallel/test-internal-fs.js b/test/parallel/test-internal-fs.js index 9bc0a98b099f75..2e47e2a3823a9c 100644 --- a/test/parallel/test-internal-fs.js +++ b/test/parallel/test-internal-fs.js @@ -2,7 +2,7 @@ 'use strict'; const common = require('../common'); -const fs = require('internal/fs'); +const fs = require('internal/fs/utils'); // Valid encodings and no args should not throw. fs.assertEncoding(); diff --git a/test/parallel/test-module-loading-globalpaths.js b/test/parallel/test-module-loading-globalpaths.js index 798b7765bb6d5b..284dbb0b3cd564 100644 --- a/test/parallel/test-module-loading-globalpaths.js +++ b/test/parallel/test-module-loading-globalpaths.js @@ -4,6 +4,7 @@ const fixtures = require('../common/fixtures'); const assert = require('assert'); const path = require('path'); const fs = require('fs'); +const { COPYFILE_FICLONE } = fs.constants; const child_process = require('child_process'); const pkgName = 'foo'; const { addLibraryPath } = require('../common/shared-lib-util'); @@ -28,7 +29,7 @@ if (process.argv[2] === 'child') { testExecPath = path.join(prefixBinPath, path.basename(process.execPath)); } const mode = fs.statSync(process.execPath).mode; - fs.writeFileSync(testExecPath, fs.readFileSync(process.execPath)); + fs.copyFileSync(process.execPath, testExecPath, COPYFILE_FICLONE); fs.chmodSync(testExecPath, mode); const runTest = (expectedString, env) => { diff --git a/test/parallel/test-next-tick-fixed-queue-regression.js b/test/parallel/test-next-tick-fixed-queue-regression.js new file mode 100644 index 00000000000000..1fe82d02b10907 --- /dev/null +++ b/test/parallel/test-next-tick-fixed-queue-regression.js @@ -0,0 +1,18 @@ +'use strict'; + +const common = require('../common'); + +// This tests a highly specific regression tied to the FixedQueue size, which +// was introduced in Node.js 9.7.0: https://github.com/nodejs/node/pull/18617 +// More specifically, a nextTick list could potentially end up not fully +// clearing in one run through if exactly 2048 ticks were added after +// microtasks were executed within the nextTick loop. + +process.nextTick(() => { + Promise.resolve(1).then(() => { + for (let i = 0; i < 2047; i++) + process.nextTick(common.mustCall()); + const immediate = setImmediate(common.mustNotCall()); + process.nextTick(common.mustCall(() => clearImmediate(immediate))); + }); +}); diff --git a/test/parallel/test-fs-chdir-errormessage.js b/test/parallel/test-process-chdir-errormessage.js similarity index 100% rename from test/parallel/test-fs-chdir-errormessage.js rename to test/parallel/test-process-chdir-errormessage.js diff --git a/test/parallel/test-stream-duplex-error-write.js b/test/parallel/test-stream-duplex-error-write.js deleted file mode 100644 index 5a80ce5c3e4989..00000000000000 --- a/test/parallel/test-stream-duplex-error-write.js +++ /dev/null @@ -1,24 +0,0 @@ -'use strict'; - -const common = require('../common'); -const { Duplex } = require('stream'); -const { strictEqual } = require('assert'); - -const duplex = new Duplex({ - write(chunk, enc, cb) { - cb(new Error('kaboom')); - }, - read() { - this.push(null); - } -}); - -duplex.on('error', common.mustCall(function() { - strictEqual(this._readableState.errorEmitted, true); - strictEqual(this._writableState.errorEmitted, true); -})); - -duplex.on('end', common.mustNotCall()); - -duplex.end('hello'); -duplex.resume(); diff --git a/test/parallel/test-stream-readable-destroy.js b/test/parallel/test-stream-readable-destroy.js index eecee04294e6fe..026aa8ca1603b8 100644 --- a/test/parallel/test-stream-readable-destroy.js +++ b/test/parallel/test-stream-readable-destroy.js @@ -189,18 +189,3 @@ const { inherits } = require('util'); read.push('hi'); read.on('data', common.mustNotCall()); } - -{ - // double error case - const read = new Readable({ - read() {} - }); - - read.on('close', common.mustCall()); - read.on('error', common.mustCall()); - - read.destroy(new Error('kaboom 1')); - read.destroy(new Error('kaboom 2')); - assert.strictEqual(read._readableState.errorEmitted, true); - assert.strictEqual(read.destroyed, true); -} diff --git a/test/parallel/test-trace-events-fs-sync.js b/test/parallel/test-trace-events-fs-sync.js index 5bd9b99ba3fedf..60ab5efa3a1d3d 100644 --- a/test/parallel/test-trace-events-fs-sync.js +++ b/test/parallel/test-trace-events-fs-sync.js @@ -9,21 +9,8 @@ const traceFile = 'node_trace.1.log'; let gid = 1; let uid = 1; -let skipSymlinks = false; -// On Windows, creating symlinks requires admin privileges. -// We'll check if we have enough privileges. -if (common.isWindows) { - try { - const o = cp.execSync('whoami /priv'); - if (!o.includes('SeCreateSymbolicLinkPrivilege')) { - skipSymlinks = true; - } - } catch (er) { - // better safe than sorry - skipSymlinks = true; - } -} else { +if (!common.isWindows) { gid = process.getgid(); uid = process.getuid(); } @@ -111,7 +98,7 @@ tests['fs.sync.write'] = 'fs.writeFileSync("fs.txt", "123", "utf8");' + // On windows, we need permissions to test symlink and readlink. // We'll only try to run these tests if we have enough privileges. -if (!skipSymlinks) { +if (common.canCreateSymLink()) { tests['fs.sync.symlink'] = 'fs.writeFileSync("fs.txt", "123", "utf8");' + 'fs.symlinkSync("fs.txt", "linkx");' + 'fs.unlinkSync("linkx");' + diff --git a/test/parallel/test-url-format-whatwg.js b/test/parallel/test-url-format-whatwg.js index 26cef6063c212f..e5c3e369e80390 100644 --- a/test/parallel/test-url-format-whatwg.js +++ b/test/parallel/test-url-format-whatwg.js @@ -111,3 +111,8 @@ assert.strictEqual( url.format(myURL, { unicode: 0 }), 'http://xn--lck1c3crb1723bpq4a.com/a?a=b#c' ); + +assert.strictEqual( + url.format(new URL('http://xn--0zwm56d.com:8080/path'), { unicode: true }), + 'http://测试.com:8080/path' +); diff --git a/test/pseudo-tty/test-tty-stdin-end.js b/test/pseudo-tty/test-tty-stdin-end.js new file mode 100644 index 00000000000000..c78f58446d03e9 --- /dev/null +++ b/test/pseudo-tty/test-tty-stdin-end.js @@ -0,0 +1,7 @@ +'use strict'; +require('../common'); + +// This test ensures that Node.js doesn't crash on `process.stdin.emit("end")`. +// https://github.com/nodejs/node/issues/1068 + +process.stdin.emit('end'); diff --git a/test/pseudo-tty/test-tty-stdin-end.out b/test/pseudo-tty/test-tty-stdin-end.out new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/test/sequential/test-async-wrap-getasyncid.js b/test/sequential/test-async-wrap-getasyncid.js index 64c4fd5cd8ab50..971296915ceecb 100644 --- a/test/sequential/test-async-wrap-getasyncid.js +++ b/test/sequential/test-async-wrap-getasyncid.js @@ -3,7 +3,7 @@ const common = require('../common'); const assert = require('assert'); const fs = require('fs'); -const fsPromises = require('fs/promises'); +const fsPromises = fs.promises; const net = require('net'); const providers = Object.assign({}, process.binding('async_wrap').Providers); const fixtures = require('../common/fixtures'); diff --git a/tools/doc/html.js b/tools/doc/html.js index 439fc057012ca7..7712290b4c61ed 100644 --- a/tools/doc/html.js +++ b/tools/doc/html.js @@ -93,7 +93,7 @@ function render(opts, cb) { filename = path.basename(filename, '.md'); parseText(lexed); - lexed = parseLists(lexed); + lexed = preprocessElements(lexed); // Generate the table of contents. // This mutates the lexed contents in-place. @@ -168,6 +168,7 @@ function altDocs(filename) { } const versions = [ + { num: '10.x' }, { num: '9.x' }, { num: '8.x', lts: true }, { num: '7.x' }, @@ -231,25 +232,28 @@ function parseText(lexed) { }); } -// Just update the list item text in-place. -// Lists that come right after a heading are what we're after. -function parseLists(input) { +// Preprocess stability blockquotes and YAML blocks. +function preprocessElements(input) { var state = null; - const savedState = []; - var depth = 0; const output = []; let headingIndex = -1; let heading = null; output.links = input.links; input.forEach(function(tok, index) { + if (tok.type === 'heading') { + headingIndex = index; + heading = tok; + } + if (tok.type === 'html' && common.isYAMLBlock(tok.text)) { + tok.text = parseYAML(tok.text); + } if (tok.type === 'blockquote_start') { - savedState.push(state); state = 'MAYBE_STABILITY_BQ'; return; } if (tok.type === 'blockquote_end' && state === 'MAYBE_STABILITY_BQ') { - state = savedState.pop(); + state = null; return; } if ((tok.type === 'paragraph' && state === 'MAYBE_STABILITY_BQ') || @@ -271,50 +275,7 @@ function parseLists(input) { return; } else if (state === 'MAYBE_STABILITY_BQ') { output.push({ type: 'blockquote_start' }); - state = savedState.pop(); - } - } - if (state === null || - (state === 'AFTERHEADING' && tok.type === 'heading')) { - if (tok.type === 'heading') { - headingIndex = index; - heading = tok; - state = 'AFTERHEADING'; - } - output.push(tok); - return; - } - if (state === 'AFTERHEADING') { - if (tok.type === 'list_start') { - state = 'LIST'; - if (depth === 0) { - output.push({ type: 'html', text: '
' }); - } - depth++; - output.push(tok); - return; - } - if (tok.type === 'html' && common.isYAMLBlock(tok.text)) { - tok.text = parseYAML(tok.text); - } - state = null; - output.push(tok); - return; - } - if (state === 'LIST') { - if (tok.type === 'list_start') { - depth++; - output.push(tok); - return; - } - if (tok.type === 'list_end') { - depth--; - output.push(tok); - if (depth === 0) { - state = null; - output.push({ type: 'html', text: '
' }); - } - return; + state = null; } } output.push(tok); diff --git a/tools/eslint-rules/eslint-check.js b/tools/eslint-rules/eslint-check.js new file mode 100644 index 00000000000000..00a5234733ec05 --- /dev/null +++ b/tools/eslint-rules/eslint-check.js @@ -0,0 +1,60 @@ +/** + * @fileoverview Check that common.skipIfEslintMissing is used if + * the eslint module is required. + */ +'use strict'; + +const utils = require('./rules-utils.js'); + +//------------------------------------------------------------------------------ +// Rule Definition +//------------------------------------------------------------------------------ +const msg = 'Please add a skipIfEslintMissing() call to allow this test to ' + + 'be skipped when Node.js is built from a source tarball.'; + +module.exports = function(context) { + const missingCheckNodes = []; + var commonModuleNode = null; + var hasEslintCheck = false; + + function testEslintUsage(context, node) { + if (utils.isRequired(node, ['../../tools/node_modules/eslint'])) { + missingCheckNodes.push(node); + } + + if (utils.isCommonModule(node)) { + commonModuleNode = node; + } + } + + function checkMemberExpression(context, node) { + if (utils.usesCommonProperty(node, ['skipIfEslintMissing'])) { + hasEslintCheck = true; + } + } + + function reportIfMissing(context) { + if (!hasEslintCheck) { + missingCheckNodes.forEach((node) => { + context.report({ + node, + message: msg, + fix: (fixer) => { + if (commonModuleNode) { + return fixer.insertTextAfter( + commonModuleNode, + '\ncommon.skipIfEslintMissing();' + ); + } + } + }); + }); + } + } + + return { + 'CallExpression': (node) => testEslintUsage(context, node), + 'MemberExpression': (node) => checkMemberExpression(context, node), + 'Program:exit': (node) => reportIfMissing(context, node) + }; +}; diff --git a/tools/eslint-rules/inspector-check.js b/tools/eslint-rules/inspector-check.js index 00a2dd02963558..189b023efc6195 100644 --- a/tools/eslint-rules/inspector-check.js +++ b/tools/eslint-rules/inspector-check.js @@ -11,7 +11,7 @@ const utils = require('./rules-utils.js'); // Rule Definition //------------------------------------------------------------------------------ const msg = 'Please add a skipIfInspectorDisabled() call to allow this ' + - 'test to be skippped when Node is built \'--without-inspector\'.'; + 'test to be skipped when Node is built \'--without-inspector\'.'; module.exports = function(context) { const missingCheckNodes = []; diff --git a/tools/test.py b/tools/test.py index a0a3121e486b3e..66b9c7291f36af 100755 --- a/tools/test.py +++ b/tools/test.py @@ -1747,7 +1747,7 @@ def DoSkip(case): timed_tests.sort(lambda a, b: a.CompareTime(b)) index = 1 for entry in timed_tests[:20]: - t = FormatTime(entry.duration) + t = FormatTime(entry.duration.total_seconds()) sys.stderr.write("%4i (%s) %s\n" % (index, t, entry.GetLabel())) index += 1